diff --git a/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..20ab9caff6b371895a0c16700cfd79413b92ebf2 --- /dev/null +++ b/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e34dde773c9a78fd0f839b9560dab3b6957ca7ed78513e86d75d8a5bb7cb63a9 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..fede13ebbb1fc7c0b063c1b0eedecfc5f2c859c6 --- /dev/null +++ b/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:949e9a6e5d9bf691cc6df3e867ec014eca39783a3c03eafa619cbe639962e2bc +size 50332828 diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_1dc-v0-res.json b/lm-evaluation-harness/tests/testdata/arithmetic_1dc-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..29e447d578ed11f77d962c079e9db9e3f415d801 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_1dc-v0-res.json @@ -0,0 +1 @@ +{"results": {"arithmetic_1dc": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_1dc": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_2-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_2-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..12a4ebe1d2a83e1a8d5dc85ade8913f31931d8b6 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_2-v0-loglikelihood @@ -0,0 +1 @@ +ddb24ddfaebe076b3aa7107937d71bf5f4503a78283bc889e39200368603681e \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..cf08b036b9eccc0d0151cb41a6ec0c4eeede2f91 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_distractor_agreement_relative_clause": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_distractor_agreement_relative_clause": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_drop_argument-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_drop_argument-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..853a4d2f92c5c6da8d146a85e120a32dca147c4c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_drop_argument-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_drop_argument": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_drop_argument": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..b8500d68b553a66f850ebc39192644c2d138f0a1 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_2-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_existential_there_quantifiers_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_existential_there_quantifiers_2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..0e7d8db1e2ad279ed4bfcc094253f1fa7723b6ce --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_principle_A_reconstruction": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_reconstruction": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..b69d445f3c257608fd5be46aa74bd53cd598042c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_superlative_quantifiers_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_superlative_quantifiers_1": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..3900f561993a333909d46e7a4fc18906c9b69721 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-loglikelihood @@ -0,0 +1 @@ +f145ad5086da0bf8c76f0730258529fa243efe32b7ab792d3c4716284b4b5495 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_nationality-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_french_nationality-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..f9dd321f7f3c9525491145df99fb4f7658be8065 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_nationality-v0-res.json @@ -0,0 +1 @@ +{"results": {"crows_pairs_french_nationality": {"likelihood_difference": 0.33534193269044926, "likelihood_difference_stderr": 0.01429836309463257, "pct_stereotype": 0.4743083003952569, "pct_stereotype_stderr": 0.031455431847992904}}, "versions": {"crows_pairs_french_nationality": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..0c01f548806c747150690d942f7def8b2d98f2a2 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-loglikelihood @@ -0,0 +1 @@ +88872f1ed1b203f9649a4ced4fb4627d18c17af455d713de6e17c05eced4ec60 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/headqa-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/headqa-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..9129d834b6037cda3db655064d6c18bb3dccfb54 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/headqa-v0-loglikelihood @@ -0,0 +1 @@ +767ca34d9714edd9fb030ddbcc35a64e5180d1e247b0cb557fbb22fdf971ad1f \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_european_history-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_european_history-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..b5cea9cbe310db37d488984f3ff6aa57921576d9 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_european_history-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-high_school_european_history": {"acc": 0.23636363636363636, "acc_norm": 0.24242424242424243, "acc_norm_stderr": 0.03346409881055953, "acc_stderr": 0.033175059300091805}}, "versions": {"hendrycksTest-high_school_european_history": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_microeconomics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_microeconomics-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..cf698d181c95ef88e774204df6f92622116d690c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_microeconomics-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-high_school_microeconomics": {"acc": 0.24369747899159663, "acc_norm": 0.22268907563025211, "acc_norm_stderr": 0.027025433498882378, "acc_stderr": 0.027886828078380558}}, "versions": {"hendrycksTest-high_school_microeconomics": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-machine_learning-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-machine_learning-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..53e498ddd480dfaf3994eba4069ead8a28694784 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-machine_learning-v0-loglikelihood @@ -0,0 +1 @@ +7a7138821a66ef946e427b40344cf7f1a916a2926995a85ef731a3bee40cb7ce \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-professional_accounting-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-professional_accounting-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..fe5997427ef5df8be6d52709189b7baa8a410df9 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-professional_accounting-v0-loglikelihood @@ -0,0 +1 @@ +847418f7b22cd9b499e95fd73c40a2fbc40076895280cc2c560199c0c4c4f433 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..ae19de0e6951bd90cd1e713d14816767496044e8 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-loglikelihood @@ -0,0 +1 @@ +5ad125e1708499832b2cee8c3388f89f9c0277010fd96fbd3359039ce8105984 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..12f5d349ebd170ee5295656bc3907f872453eca6 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-res.json @@ -0,0 +1 @@ +{"results": {"lambada_openai_mt_de": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_de": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/mnli-v0-res.json b/lm-evaluation-harness/tests/testdata/mnli-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..d9dada7a0244534c35d86efb71a03fbd90217328 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/mnli-v0-res.json @@ -0,0 +1 @@ +{"results": {"mnli": {"acc": 0.32868059093224655, "acc_stderr": 0.004741640290753859}}, "versions": {"mnli": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/multirc-v0-res.json b/lm-evaluation-harness/tests/testdata/multirc-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..87e9c532eb7d7deb7d08635dd955df7a68ab9813 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/multirc-v0-res.json @@ -0,0 +1 @@ +{"results": {"multirc": {"acc": 0.07450157397691501, "acc_stderr": 0.008510441526175931}}, "versions": {"multirc": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_arxiv-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_arxiv-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..d19d0c6fee7f47af1ad3f5af9ff1d7a1544e2e98 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_arxiv-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_arxiv": {"bits_per_byte": 1.0750412350569374e-05, "byte_perplexity": 1.0000107504701365, "word_perplexity": 1.0000819333090385}}, "versions": {"pile_arxiv": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_gutenberg-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_gutenberg-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..bd7b15927f717baab5b7ce2e9d659dda6d681769 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_gutenberg-v0-loglikelihood_rolling @@ -0,0 +1 @@ +02a559f74a9105145e7d4d9c5ddea372b5b4938f5368dc8ffafc39cbe3b4c7ef \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..5f76588a813eebe7f0958a07253480d30de2ccf3 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling @@ -0,0 +1 @@ +520ea6e04e8a39dc0b5f63a837429a78a40e63d39d109096101feb8c5b2cf8d8 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..ca433e3c854780d034839c8e4d029cb6b5bfca1a --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_openwebtext2": {"bits_per_byte": 0.000184802319359215, "byte_perplexity": 1.000128103411166, "word_perplexity": 1.0007951516532847}}, "versions": {"pile_openwebtext2": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/rte-v0-res.json b/lm-evaluation-harness/tests/testdata/rte-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..10314dd047e4d7202c755fe8cfc55bc9b1edd5f8 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/rte-v0-res.json @@ -0,0 +1 @@ +{"results": {"rte": {"acc": 0.5379061371841155, "acc_stderr": 0.030009848912529117}}, "versions": {"rte": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/squad2-v0-greedy_until b/lm-evaluation-harness/tests/testdata/squad2-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..024652e0a39ed0298f8f6f67453f644a68f3a367 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/squad2-v0-greedy_until @@ -0,0 +1 @@ +b261e8885c11750ce6911bb11e8693de03d53758297c26fb14cfc1ef508862cb \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt14-fr-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt14-fr-en-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..5261876f55a69dcaf33b3842690f81c12eb42f3a --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt14-fr-en-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt14-fr-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.01275083169440515, "chrf_stderr": 8.45474998563806e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt14-fr-en": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt16-ro-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt16-ro-en-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..fbcac1b7e3887c6ffa8fd6da6e21595fb0c49a4f --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt16-ro-en-v0-greedy_until @@ -0,0 +1 @@ +d1b7c50751b0d5d7470b7f49f2bab9d09792c91460fc92cc34f06617013d7c65 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-cs-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-cs-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..d14fc4939aecb7bb40458c34954c1242d9f20501 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-cs-v0-greedy_until @@ -0,0 +1 @@ +5a34e6863bf6965afd31653de50bac5fecf58db65dbaba46921504a2b7463786 \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc0a57e8324f3025c96fad65f18fc59de6fa56c --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py @@ -0,0 +1,88 @@ +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_chinese_clip": [ + "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ChineseCLIPConfig", + "ChineseCLIPOnnxConfig", + "ChineseCLIPTextConfig", + "ChineseCLIPVisionConfig", + ], + "processing_chinese_clip": ["ChineseCLIPProcessor"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_chinese_clip"] = ["ChineseCLIPFeatureExtractor"] + _import_structure["image_processing_chinese_clip"] = ["ChineseCLIPImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_chinese_clip"] = [ + "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ChineseCLIPModel", + "ChineseCLIPPreTrainedModel", + "ChineseCLIPTextModel", + "ChineseCLIPVisionModel", + ] + +if TYPE_CHECKING: + from .configuration_chinese_clip import ( + CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + ChineseCLIPConfig, + ChineseCLIPOnnxConfig, + ChineseCLIPTextConfig, + ChineseCLIPVisionConfig, + ) + from .processing_chinese_clip import ChineseCLIPProcessor + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_chinese_clip import ( + CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + ChineseCLIPModel, + ChineseCLIPPreTrainedModel, + ChineseCLIPTextModel, + ChineseCLIPVisionModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0db3629778ae9f9ed8813a32fc6ca4bd411060a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1645746e061308fb458be0cd79207739ac7c117f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5470fb88bf5244603b6eeac94405c1c6c198ec8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8515a6b5a5f1f04958136affd5cc2ca7d0cc3f9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e4821d148aa4c282dd15207041ea4e144dc9c35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07cd00b6b85542a964cb51853c8fe2b47d4ffbc2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..672f5c1e2328b7abb04e5f62522f841890506902 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..349833d1f2c33598134933521958b89ff97cc1fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py @@ -0,0 +1,468 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Chinese-CLIP model configuration""" + +import os +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Mapping, Optional, Union + + +if TYPE_CHECKING: + from ...processing_utils import ProcessorMixin + from ...utils import TensorType + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class ChineseCLIPTextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a + Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Chinese CLIP + [OFA-Sys/chinese-clip-vit-base-patch16](https: + //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented + by the `inputs_ids` passed when calling [`ChineseCLIPModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + pad_token_id (`int`, *optional*, defaults to 0): + Padding token id. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + + Example: + + ```python + >>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel + + >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> configuration = ChineseCLIPTextConfig() + + >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> model = ChineseCLIPTextModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "chinese_clip_text_model" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + initializer_factor=1.0, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from ChineseCLIPConfig + if config_dict.get("model_type") == "chinese_clip": + config_dict = config_dict["text_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ChineseCLIPVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an + ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the ChineseCLIP + [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 32): + The size (resolution) of each patch. + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + Example: + ```python + >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel + + >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> configuration = ChineseCLIPVisionConfig() + + >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> model = ChineseCLIPVisionModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "chinese_clip_vision_model" + + def __init__( + self, + hidden_size=768, + intermediate_size=3072, + projection_dim=512, + num_hidden_layers=12, + num_attention_heads=12, + num_channels=3, + image_size=224, + patch_size=32, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.projection_dim = projection_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from ChineseCLIPConfig + if config_dict.get("model_type") == "chinese_clip": + config_dict = config_dict["vision_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ChineseCLIPConfig(PretrainedConfig): + r""" + [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used + to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model + configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the + Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) + architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`]. + vision_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`]. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + logit_scale_init_value (`float`, *optional*, defaults to 2.6592): + The inital value of the *logit_scale* paramter. Default is used as per the original ChineseCLIP + implementation. + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel + + >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> configuration = ChineseCLIPConfig() + + >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration + >>> model = ChineseCLIPModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig + + >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration + >>> config_text = ChineseCLIPTextConfig() + >>> config_vision = ChineseCLIPVisionConfig() + + >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision) + ```""" + + model_type = "chinese_clip" + + def __init__( + self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs + ): + # If `_config_dict` exist, we use them for the backward compatibility. + # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot + # of confusion!). + text_config_dict = kwargs.pop("text_config_dict", None) + vision_config_dict = kwargs.pop("vision_config_dict", None) + + super().__init__(**kwargs) + + # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in + # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most + # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. + if text_config_dict is not None: + if text_config is None: + text_config = {} + + # This is the complete result when using `text_config_dict`. + _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict() + + # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. + for key, value in _text_config_dict.items(): + if key in text_config and value != text_config[key] and key not in ["transformers_version"]: + # If specified in `text_config_dict` + if key in text_config_dict: + message = ( + f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " + f'The value `text_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. " + f'The value `text_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `text_config` with the ones in `_text_config_dict`. + text_config.update(_text_config_dict) + + if vision_config_dict is not None: + if vision_config is None: + vision_config = {} + + # This is the complete result when using `vision_config_dict`. + _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict() + # convert keys to string instead of integer + if "id2label" in _vision_config_dict: + _vision_config_dict["id2label"] = { + str(key): value for key, value in _vision_config_dict["id2label"].items() + } + + # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. + for key, value in _vision_config_dict.items(): + if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: + # If specified in `vision_config_dict` + if key in vision_config_dict: + message = ( + f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " + f'values. The value `vision_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`vision_config_dict` is provided which will be used to initialize " + f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `vision_config` with the ones in `_vision_config_dict`. + vision_config.update(_vision_config_dict) + + if text_config is None: + text_config = {} + logger.info("`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.") + + if vision_config is None: + vision_config = {} + logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.") + + self.text_config = ChineseCLIPTextConfig(**text_config) + self.vision_config = ChineseCLIPVisionConfig(**vision_config) + + self.projection_dim = projection_dim + self.logit_scale_init_value = logit_scale_init_value + self.initializer_factor = 1.0 + self.initializer_range = 0.02 + + @classmethod + def from_text_vision_configs( + cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs + ): + r""" + Instantiate a [`ChineseCLIPConfig`] (or a derived class) from Chinese-CLIP text model configuration and + Chinese-CLIP vision model configuration. Returns: + [`ChineseCLIPConfig`]: An instance of a configuration object + """ + + return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +class ChineseCLIPOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("input_ids", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ("attention_mask", {0: "batch", 1: "sequence"}), + ] + ) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("logits_per_image", {0: "batch"}), + ("logits_per_text", {0: "batch"}), + ("text_embeds", {0: "batch"}), + ("image_embeds", {0: "batch"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + def generate_dummy_inputs( + self, + processor: "ProcessorMixin", + batch_size: int = -1, + seq_length: int = -1, + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + text_input_dict = super().generate_dummy_inputs( + processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework + ) + image_input_dict = super().generate_dummy_inputs( + processor.image_processor, batch_size=batch_size, framework=framework + ) + return {**text_input_dict, **image_input_dict} + + @property + def default_onnx_opset(self) -> int: + return 14 diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..02c4b7b754b295016c23b114213d1dd0353363e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch + +from transformers import ChineseCLIPConfig, ChineseCLIPModel + + +def copy_attn_layer(hf_attn_layer, pt_weights, prefix): + q_proj, k_proj, v_proj = pt_weights[f"{prefix}.in_proj_weight"].chunk(3, dim=0) + q_proj_bias, k_proj_bias, v_proj_bias = pt_weights[f"{prefix}.in_proj_bias"].chunk(3, dim=0) + + out_proj_weights = pt_weights[f"{prefix}.out_proj.weight"] + out_proj_bias = pt_weights[f"{prefix}.out_proj.bias"] + + hf_attn_layer.q_proj.weight.data = q_proj + hf_attn_layer.q_proj.bias.data = q_proj_bias + + hf_attn_layer.k_proj.weight.data = k_proj + hf_attn_layer.k_proj.bias.data = k_proj_bias + + hf_attn_layer.v_proj.weight.data = v_proj + hf_attn_layer.v_proj.bias.data = v_proj_bias + + hf_attn_layer.out_proj.weight.data = out_proj_weights + hf_attn_layer.out_proj.bias.data = out_proj_bias + + +def copy_mlp(hf_mlp, pt_weights, prefix): + copy_linear(hf_mlp.fc1, pt_weights, f"{prefix}.c_fc") + copy_linear(hf_mlp.fc2, pt_weights, f"{prefix}.c_proj") + + +def copy_linear(hf_linear, pt_weights, prefix): + hf_linear.weight.data = pt_weights[f"{prefix}.weight"].data + hf_linear.bias.data = pt_weights[f"{prefix}.bias"].data + + +def copy_layer(hf_layer, pt_weights, prefix): + # copy layer norms + copy_linear(hf_layer.layer_norm1, pt_weights, f"{prefix}.ln_1") + copy_linear(hf_layer.layer_norm2, pt_weights, f"{prefix}.ln_2") + + # copy MLP + copy_mlp(hf_layer.mlp, pt_weights, f"{prefix}.mlp") + + # copy attn + copy_attn_layer(hf_layer.self_attn, pt_weights, f"{prefix}.attn") + + +def copy_layers(hf_layers, pt_weights, prefix): + for layer_id, hf_layer in enumerate(hf_layers): + copy_layer(hf_layer, pt_weights, f"{prefix}.{layer_id}") + + +def copy_text_model_and_projection(hf_model, pt_weights): + # copy projection + hf_model.text_projection.weight.data = pt_weights["text_projection"].data.T + + # copy text encoder + for name, param in hf_model.text_model.named_parameters(): + param.data = pt_weights[f"bert.{name}"].data + + +def copy_vision_model_and_projection(hf_model, pt_weights): + # copy projection + hf_model.visual_projection.weight.data = pt_weights["visual.proj"].data.T + + # copy layer norms + copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, "visual.ln_pre") + copy_linear(hf_model.vision_model.post_layernorm, pt_weights, "visual.ln_post") + + # copy embeddings + hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights["visual.conv1.weight"].data + hf_model.vision_model.embeddings.class_embedding.data = pt_weights["visual.class_embedding"].data + hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights["visual.positional_embedding"].data + + # copy encoder + copy_layers(hf_model.vision_model.encoder.layers, pt_weights, "visual.transformer.resblocks") + + +@torch.no_grad() +def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): + """ + Copy/paste/tweak model's weights to transformers design. + """ + + assert config_path is not None, "Please specify the ChineseCLIP model config of the corresponding model size." + config = ChineseCLIPConfig.from_pretrained(config_path) + + hf_model = ChineseCLIPModel(config).eval() + + pt_weights = torch.load(checkpoint_path, map_location="cpu")["state_dict"] + pt_weights = {(name[7:] if name.startswith("module.") else name): value for name, value in pt_weights.items()} + + copy_text_model_and_projection(hf_model, pt_weights) + copy_vision_model_and_projection(hf_model, pt_weights) + hf_model.logit_scale.data = pt_weights["logit_scale"].data + + hf_model.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + help="Path to the output folder storing converted hf PyTorch model.", + ) + parser.add_argument( + "--checkpoint_path", default=None, type=str, help="Path to original github format ChineseCLIP checkpoint." + ) + parser.add_argument( + "--config_path", default=None, required=True, type=str, help="Path to hf config.json of model to convert." + ) + args = parser.parse_args() + + convert_chinese_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) + print("The conversion is finished!") diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..09aa4106b718ebf39c793b8325892670af566fe3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# Copyright 2021 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for Chinese-CLIP.""" + +import warnings + +from ...utils import logging +from .image_processing_chinese_clip import ChineseCLIPImageProcessor + + +logger = logging.get_logger(__name__) + + +class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor): + def __init__(self, *args, **kwargs) -> None: + warnings.warn( + "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." + " Please use ChineseCLIPImageProcessor instead.", + FutureWarning, + ) + super().__init__(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..60f40272bf92716735f62371506202bf3fdd70cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py @@ -0,0 +1,331 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for Chinese-CLIP.""" + +from typing import Dict, List, Optional, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + convert_to_rgb, + get_resize_output_image_size, + resize, + to_channel_dimension_format, +) +from ...image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import TensorType, is_vision_available, logging + + +logger = logging.get_logger(__name__) + + +if is_vision_available(): + import PIL + + +class ChineseCLIPImageProcessor(BaseImageProcessor): + r""" + Constructs a Chinese-CLIP image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by + `do_resize` in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): + Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. + do_center_crop (`bool`, *optional*, defaults to `True`): + Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the + `preprocess` method. + crop_size (`Dict[str, int]` *optional*, defaults to 224): + Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` + method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in + the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` + method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_center_crop: bool = True, + crop_size: Dict[str, int] = None, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"shortest_edge": 224} + size = get_size_dict(size, default_to_square=False) + crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} + crop_size = get_size_dict(crop_size) + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.do_convert_rgb = do_convert_rgb + self._valid_processor_keys = [ + "images", + "do_resize", + "size", + "resample", + "do_center_crop", + "crop_size", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "do_convert_rgb", + "return_tensors", + "data_format", + "input_data_format", + ] + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge + resized to keep the input aspect ratio. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + Resampling filter to use when resiizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred from the input + image. + """ + size = get_size_dict(size, default_to_square=False) + output_size = get_resize_output_image_size( + image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format + ) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def preprocess( + self, + images: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: int = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. + resample (`int`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only + has an effect if `do_resize` is set to `True`. + do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): + Whether to center crop the image. + crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): + Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to + `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False) + resample = resample if resample is not None else self.resample + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + crop_size = crop_size if crop_size is not None else self.crop_size + crop_size = get_size_dict(crop_size) + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + + images = make_list_of_images(images) + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_center_crop=do_center_crop, + crop_size=crop_size, + do_resize=do_resize, + size=size, + resample=resample, + ) + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_resize: + images = [ + self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_center_crop: + images = [ + self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e97c20b24cd052e9d9f4815404cb114019a17f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py @@ -0,0 +1,1562 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Chinese-CLIP model.""" + + +import math +from dataclasses import dataclass +from typing import Any, List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPooling, + BaseModelOutputWithPoolingAndCrossAttentions, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16" +_CONFIG_FOR_DOC = "ChineseCLIPConfig" + + +from ..deprecated._archive_maps import CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +# Copied from transformers.models.clip.modeling_clip.contrastive_loss +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) + + +def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(similarity.t()) + return (caption_loss + image_loss) / 2.0 + + +@dataclass +class ChineseCLIPOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of + [`ChineseCLIPTextModel`]. + image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of + [`ChineseCLIPVisionModel`]. + text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): + The output of the [`ChineseCLIPTextModel`]. + vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): + The output of the [`ChineseCLIPVisionModel`]. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + image_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None + vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText +class ChineseCLIPTextEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP +class ChineseCLIPVisionEmbeddings(nn.Module): + def __init__(self, config: ChineseCLIPVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText +class ChineseCLIPTextSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText +class ChineseCLIPTextSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText +class ChineseCLIPTextAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = ChineseCLIPTextSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class ChineseCLIPVisionAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit akward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText +class ChineseCLIPTextIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText +class ChineseCLIPTextOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision +class ChineseCLIPVisionMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText +class ChineseCLIPTextLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = ChineseCLIPTextAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute") + self.intermediate = ChineseCLIPTextIntermediate(config) + self.output = ChineseCLIPTextOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class ChineseCLIPVisionLayer(nn.Module): + def __init__(self, config: ChineseCLIPConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = ChineseCLIPVisionAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.mlp = ChineseCLIPVisionMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText +class ChineseCLIPTextPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class ChineseCLIPPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ChineseCLIPConfig + base_model_prefix = "chinese_clip" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor + if isinstance(module, ChineseCLIPVisionEmbeddings): + factor = self.config.initializer_factor + nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) + nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) + nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) + elif isinstance(module, ChineseCLIPTextEmbeddings): + nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) + nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) + nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) + for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: + if embedding.padding_idx is not None: + embedding.weight.data[embedding.padding_idx].zero_() + elif isinstance(module, ChineseCLIPVisionAttention): + factor = self.config.initializer_factor + in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (module.embed_dim**-0.5) * factor + nn.init.normal_(module.q_proj.weight, std=in_proj_std) + nn.init.normal_(module.k_proj.weight, std=in_proj_std) + nn.init.normal_(module.v_proj.weight, std=in_proj_std) + nn.init.normal_(module.out_proj.weight, std=out_proj_std) + elif isinstance(module, ChineseCLIPVisionMLP): + factor = self.config.initializer_factor + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + elif isinstance(module, ChineseCLIPModel): + nn.init.normal_( + module.text_projection.weight, + std=module.text_embed_dim**-0.5 * self.config.initializer_factor, + ) + nn.init.normal_( + module.visual_projection.weight, + std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, + ) + + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + + +CHINESE_CLIP_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CHINESE_CLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText +class ChineseCLIPTextEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class ChineseCLIPVisionEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`ChineseCLIPVisionEncoderLayer`]. + + Args: + config: ChineseCLIPConfig + """ + + def __init__(self, config: ChineseCLIPConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class ChineseCLIPVisionTransformer(nn.Module): + def __init__(self, config: ChineseCLIPVisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = ChineseCLIPVisionEmbeddings(config) + self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = ChineseCLIPVisionEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layrnorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The text model from CHINESE_CLIP without any head or projection on top.", + CHINESE_CLIP_START_DOCSTRING, +) +class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + config_class = ChineseCLIPTextConfig + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = ChineseCLIPTextEmbeddings(config) + self.encoder = ChineseCLIPTextEncoder(config) + + self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """The vision model from CHINESE_CLIP without any head or projection on top.""", + CHINESE_CLIP_START_DOCSTRING, +) +class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): + config_class = ChineseCLIPVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: ChineseCLIPVisionConfig): + super().__init__(config) + self.vision_model = ChineseCLIPVisionTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel + + >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + return self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +@add_start_docstrings(CHINESE_CLIP_START_DOCSTRING) +class ChineseCLIPModel(ChineseCLIPPreTrainedModel): + config_class = ChineseCLIPConfig + + def __init__(self, config: ChineseCLIPConfig): + super().__init__(config) + + if not isinstance(config.text_config, ChineseCLIPTextConfig): + raise ValueError( + "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, ChineseCLIPVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) + self.vision_model = ChineseCLIPVisionTransformer(vision_config) + + self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) + self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the final [CLS] hidden state of Text-Transformer. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, ChineseCLIPModel + + >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") + >>> text_features = model.get_text_features(**inputs) + >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) + ```""" + # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = text_outputs[0][:, 0, :] + text_features = self.text_projection(pooled_output) + + return text_features + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the final [CLS] hidden state of Vision-Transformer. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, ChineseCLIPModel + + >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> image_features = model.get_image_features(**inputs) + >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) + ```""" + # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = vision_outputs[1] # pooled_output + image_features = self.visual_projection(pooled_output) + + return image_features + + @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, ChineseCLIPOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, ChineseCLIPModel + + >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") + + >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + image_embeds = self.visual_projection(image_embeds) + + text_embeds = text_outputs[0][:, 0, :] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale + logits_per_image = logits_per_text.t() + + loss = None + if return_loss: + loss = chinese_clip_loss(logits_per_text) + + if not return_dict: + # fix the None pooled_output of text_outputs to conform with dict_output + pooled_output = text_outputs[1] + if pooled_output is None: + text_outputs = (text_outputs[0],) + text_outputs[2:] + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return ChineseCLIPOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..1f44fc50aed5763f6ac2eaaab7714c05170ad8c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py @@ -0,0 +1,141 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for Chinese-CLIP +""" + +import warnings + +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding + + +class ChineseCLIPProcessor(ProcessorMixin): + r""" + Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a + single processor. + + [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`]. + See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information. + + Args: + image_processor ([`ChineseCLIPImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`BertTokenizerFast`], *optional*): + The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "ChineseCLIPImageProcessor" + tokenizer_class = ("BertTokenizer", "BertTokenizerFast") + + def __init__(self, image_processor=None, tokenizer=None, **kwargs): + feature_extractor = None + if "feature_extractor" in kwargs: + warnings.warn( + "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" + " instead.", + FutureWarning, + ) + feature_extractor = kwargs.pop("feature_extractor") + + image_processor = image_processor if image_processor is not None else feature_extractor + if image_processor is None: + raise ValueError("You need to specify an `image_processor`.") + if tokenizer is None: + raise ValueError("You need to specify a `tokenizer`.") + + super().__init__(image_processor, tokenizer) + self.current_processor = self.image_processor + + def __call__(self, text=None, images=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to + CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring + of the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. Both channels-first and channels-last formats are supported. + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + """ + + if text is None and images is None: + raise ValueError("You have to specify either text or images. Both cannot be none.") + + if text is not None: + encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) + + if images is not None: + image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) + + if text is not None and images is not None: + encoding["pixel_values"] = image_features.pixel_values + return encoding + elif text is not None: + return encoding + else: + return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + @property + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + @property + def feature_extractor_class(self): + warnings.warn( + "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", + FutureWarning, + ) + return self.image_processor_class diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b63983d156c0591aa5cdc2759d4efd66dca09180 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/regnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5084c4486008d143b040a93069c77624c5c5a734 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/regnet/__init__.py @@ -0,0 +1,111 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_torch_available, +) + + +_import_structure = {"configuration_regnet": ["REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "RegNetConfig"]} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_regnet"] = [ + "REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "RegNetForImageClassification", + "RegNetModel", + "RegNetPreTrainedModel", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_regnet"] = [ + "TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFRegNetForImageClassification", + "TFRegNetModel", + "TFRegNetPreTrainedModel", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_regnet"] = [ + "FlaxRegNetForImageClassification", + "FlaxRegNetModel", + "FlaxRegNetPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_regnet import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP, RegNetConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_regnet import ( + REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, + RegNetForImageClassification, + RegNetModel, + RegNetPreTrainedModel, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_regnet import ( + TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRegNetForImageClassification, + TFRegNetModel, + TFRegNetPreTrainedModel, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_regnet import ( + FlaxRegNetForImageClassification, + FlaxRegNetModel, + FlaxRegNetPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25d64d1b38496427ef00faaf7085241cc784f446 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/configuration_regnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/configuration_regnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d15d0b90e9eb1c8cbfc415031890bc5ef9316002 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/configuration_regnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_seer_10b_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_seer_10b_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45ff49a52931eb6459199e954b55ea9103a38a1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_seer_10b_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d8ef1496d832b31ea6dce4041acae5157b2defe Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_flax_regnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_flax_regnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70e422fedfc065f7211e54ca61fb84cba860ce49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_flax_regnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_regnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_regnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a71ae4690ed96ce8fb23f387f3cf4aad55cfed0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_regnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_tf_regnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_tf_regnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cf8f1e560eab263fb5222b88a16dbc9986eddcc Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_tf_regnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/configuration_regnet.py b/venv/lib/python3.10/site-packages/transformers/models/regnet/configuration_regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..629ac733917e3abc6889658ede6275eac3a9c663 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/regnet/configuration_regnet.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" RegNet model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class RegNetConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`RegNetModel`]. It is used to instantiate a RegNet + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the RegNet + [facebook/regnet-y-040](https://huggingface.co/facebook/regnet-y-040) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + embedding_size (`int`, *optional*, defaults to 64): + Dimensionality (hidden size) for the embedding layer. + hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`): + Dimensionality (hidden size) at each stage. + depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`): + Depth (number of layers) for each stage. + layer_type (`str`, *optional*, defaults to `"y"`): + The layer to use, it can be either `"x" or `"y"`. An `x` layer is a ResNet's BottleNeck layer with + `reduction` fixed to `1`. While a `y` layer is a `x` but with squeeze and excitation. Please refer to the + paper for a detailed explanation of how these layers were constructed. + hidden_act (`str`, *optional*, defaults to `"relu"`): + The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` + are supported. + downsample_in_first_stage (`bool`, *optional*, defaults to `False`): + If `True`, the first stage will downsample the inputs using a `stride` of 2. + + Example: + ```python + >>> from transformers import RegNetConfig, RegNetModel + + >>> # Initializing a RegNet regnet-y-40 style configuration + >>> configuration = RegNetConfig() + >>> # Initializing a model from the regnet-y-40 style configuration + >>> model = RegNetModel(configuration) + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "regnet" + layer_types = ["x", "y"] + + def __init__( + self, + num_channels=3, + embedding_size=32, + hidden_sizes=[128, 192, 512, 1088], + depths=[2, 6, 12, 2], + groups_width=64, + layer_type="y", + hidden_act="relu", + **kwargs, + ): + super().__init__(**kwargs) + if layer_type not in self.layer_types: + raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}") + self.num_channels = num_channels + self.embedding_size = embedding_size + self.hidden_sizes = hidden_sizes + self.depths = depths + self.groups_width = groups_width + self.layer_type = layer_type + self.hidden_act = hidden_act + # always downsample in the first stage + self.downsample_in_first_stage = True diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..93a516fb3c7747698fbb38d8ee2e4f85df77be30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py @@ -0,0 +1,304 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert RegNet 10B checkpoints vissl.""" +# You need to install a specific version of classy vision +# pip install git+https://github.com/FrancescoSaverioZuppichini/ClassyVision.git@convert_weights + +import argparse +import json +import os +import re +from collections import OrderedDict +from dataclasses import dataclass, field +from functools import partial +from pathlib import Path +from pprint import pprint +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn +from classy_vision.models.regnet import RegNet, RegNetParams +from huggingface_hub import cached_download, hf_hub_url +from torch import Tensor +from vissl.models.model_helpers import get_trunk_forward_outputs + +from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger() + + +@dataclass +class Tracker: + module: nn.Module + traced: List[nn.Module] = field(default_factory=list) + handles: list = field(default_factory=list) + name2module: Dict[str, nn.Module] = field(default_factory=OrderedDict) + + def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str): + has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) + if has_not_submodules: + self.traced.append(m) + self.name2module[name] = m + + def __call__(self, x: Tensor): + for name, m in self.module.named_modules(): + self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name))) + self.module(x) + [x.remove() for x in self.handles] + return self + + @property + def parametrized(self): + # check the len of the state_dict keys to see if we have learnable params + return {k: v for k, v in self.name2module.items() if len(list(v.state_dict().keys())) > 0} + + +class FakeRegNetVisslWrapper(nn.Module): + """ + Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file. + """ + + def __init__(self, model: nn.Module): + super().__init__() + + feature_blocks: List[Tuple[str, nn.Module]] = [] + # - get the stem + feature_blocks.append(("conv1", model.stem)) + # - get all the feature blocks + for k, v in model.trunk_output.named_children(): + assert k.startswith("block"), f"Unexpected layer name {k}" + block_index = len(feature_blocks) + 1 + feature_blocks.append((f"res{block_index}", v)) + + self._feature_blocks = nn.ModuleDict(feature_blocks) + + def forward(self, x: Tensor): + return get_trunk_forward_outputs( + x, + out_feat_keys=None, + feature_blocks=self._feature_blocks, + ) + + +class FakeRegNetParams(RegNetParams): + """ + Used to instantiace a RegNet model from classy vision with the same depth as the 10B one but with super small + parameters, so we can trace it in memory. + """ + + def get_expanded_params(self): + return [(8, 2, 2, 8, 1.0), (8, 2, 7, 8, 1.0), (8, 2, 17, 8, 1.0), (8, 2, 1, 8, 1.0)] + + +def get_from_to_our_keys(model_name: str) -> Dict[str, str]: + """ + Returns a dictionary that maps from original model's key -> our implementation's keys + """ + + # create our model (with small weights) + our_config = RegNetConfig(depths=[2, 7, 17, 1], hidden_sizes=[8, 8, 8, 8], groups_width=8) + if "in1k" in model_name: + our_model = RegNetForImageClassification(our_config) + else: + our_model = RegNetModel(our_config) + # create from model (with small weights) + from_model = FakeRegNetVisslWrapper( + RegNet(FakeRegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)) + ) + + with torch.no_grad(): + from_model = from_model.eval() + our_model = our_model.eval() + + x = torch.randn((1, 3, 32, 32)) + # trace both + dest_tracker = Tracker(our_model) + dest_traced = dest_tracker(x).parametrized + + pprint(dest_tracker.name2module) + src_tracker = Tracker(from_model) + src_traced = src_tracker(x).parametrized + + # convert the keys -> module dict to keys -> params + def to_params_dict(dict_with_modules): + params_dict = OrderedDict() + for name, module in dict_with_modules.items(): + for param_name, param in module.state_dict().items(): + params_dict[f"{name}.{param_name}"] = param + return params_dict + + from_to_ours_keys = {} + + src_state_dict = to_params_dict(src_traced) + dst_state_dict = to_params_dict(dest_traced) + + for (src_key, src_param), (dest_key, dest_param) in zip(src_state_dict.items(), dst_state_dict.items()): + from_to_ours_keys[src_key] = dest_key + logger.info(f"{src_key} -> {dest_key}") + # if "in1k" was in the model_name it means it must have a classification head (was finetuned) + if "in1k" in model_name: + from_to_ours_keys["0.clf.0.weight"] = "classifier.1.weight" + from_to_ours_keys["0.clf.0.bias"] = "classifier.1.bias" + + return from_to_ours_keys + + +def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True): + filename = "imagenet-1k-id2label.json" + num_labels = 1000 + + repo_id = "huggingface/label-files" + num_labels = num_labels + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = {int(k): v for k, v in id2label.items()} + + id2label = id2label + label2id = {v: k for k, v in id2label.items()} + + ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) + + names_to_config = { + "regnet-y-10b-seer": ImageNetPreTrainedConfig( + depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 + ), + # finetuned on imagenet + "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( + depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 + ), + } + + # add seer weights logic + def load_using_classy_vision(checkpoint_url: str) -> Tuple[Dict, Dict]: + files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location="cpu") + # check if we have a head, if yes add it + model_state_dict = files["classy_state_dict"]["base_model"]["model"] + return model_state_dict["trunk"], model_state_dict["heads"] + + names_to_from_model = { + "regnet-y-10b-seer": partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", + ), + "regnet-y-10b-seer-in1k": partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", + ), + } + + from_to_ours_keys = get_from_to_our_keys(model_name) + + if not (save_directory / f"{model_name}.pth").exists(): + logger.info("Loading original state_dict.") + from_state_dict_trunk, from_state_dict_head = names_to_from_model[model_name]() + from_state_dict = from_state_dict_trunk + if "in1k" in model_name: + # add the head + from_state_dict = {**from_state_dict_trunk, **from_state_dict_head} + logger.info("Done!") + + converted_state_dict = {} + + not_used_keys = list(from_state_dict.keys()) + regex = r"\.block.-part." + # this is "interesting", so the original checkpoints have `block[0,1]-part` in each key name, we remove it + for key in from_state_dict.keys(): + # remove the weird "block[0,1]-part" from the key + src_key = re.sub(regex, "", key) + # now src_key from the model checkpoints is the one we got from the original model after tracing, so use it to get the correct destination key + dest_key = from_to_ours_keys[src_key] + # store the parameter with our key + converted_state_dict[dest_key] = from_state_dict[key] + not_used_keys.remove(key) + # check that all keys have been updated + assert len(not_used_keys) == 0, f"Some keys where not used {','.join(not_used_keys)}" + + logger.info(f"The following keys were not used: {','.join(not_used_keys)}") + + # save our state dict to disk + torch.save(converted_state_dict, save_directory / f"{model_name}.pth") + + del converted_state_dict + else: + logger.info("The state_dict was already stored on disk.") + if push_to_hub: + logger.info(f"Token is {os.environ['HF_TOKEN']}") + logger.info("Loading our model.") + # create our model + our_config = names_to_config[model_name] + our_model_func = RegNetModel + if "in1k" in model_name: + our_model_func = RegNetForImageClassification + our_model = our_model_func(our_config) + # place our model to the meta device (so remove all the weights) + our_model.to(torch.device("meta")) + logger.info("Loading state_dict in our model.") + # load state dict + state_dict_keys = our_model.state_dict().keys() + PreTrainedModel._load_pretrained_model_low_mem( + our_model, state_dict_keys, [save_directory / f"{model_name}.pth"] + ) + logger.info("Finally, pushing!") + # push it to hub + our_model.push_to_hub( + repo_path_or_name=save_directory / model_name, + commit_message="Add model", + output_dir=save_directory / model_name, + ) + size = 384 + # we can use the convnext one + image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=size) + image_processor.push_to_hub( + repo_path_or_name=save_directory / model_name, + commit_message="Add image processor", + output_dir=save_directory / model_name, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default=None, + type=str, + help=( + "The name of the model you wish to convert, it must be one of the supported regnet* architecture," + " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." + ), + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=Path, + required=True, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument( + "--push_to_hub", + default=True, + type=bool, + required=False, + help="If True, push model and image processor to the hub.", + ) + + args = parser.parse_args() + + pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path + pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) + convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/convert_regnet_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/regnet/convert_regnet_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..d29077c1a729ba14f242716a482504a107c087d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/regnet/convert_regnet_to_pytorch.py @@ -0,0 +1,459 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert RegNet checkpoints from timm and vissl.""" + + +import argparse +import json +from dataclasses import dataclass, field +from functools import partial +from pathlib import Path +from typing import Callable, Dict, List, Tuple + +import timm +import torch +import torch.nn as nn +from classy_vision.models.regnet import RegNet, RegNetParams, RegNetY32gf, RegNetY64gf, RegNetY128gf +from huggingface_hub import cached_download, hf_hub_url +from torch import Tensor +from vissl.models.model_helpers import get_trunk_forward_outputs + +from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger() + + +@dataclass +class Tracker: + module: nn.Module + traced: List[nn.Module] = field(default_factory=list) + handles: list = field(default_factory=list) + + def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): + has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) + if has_not_submodules: + self.traced.append(m) + + def __call__(self, x: Tensor): + for m in self.module.modules(): + self.handles.append(m.register_forward_hook(self._forward_hook)) + self.module(x) + [x.remove() for x in self.handles] + return self + + @property + def parametrized(self): + # check the len of the state_dict keys to see if we have learnable params + return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced)) + + +@dataclass +class ModuleTransfer: + src: nn.Module + dest: nn.Module + verbose: int = 1 + src_skip: List = field(default_factory=list) + dest_skip: List = field(default_factory=list) + raise_if_mismatch: bool = True + + def __call__(self, x: Tensor): + """ + Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the + hood we tracked all the operations in both modules. + """ + dest_traced = Tracker(self.dest)(x).parametrized + src_traced = Tracker(self.src)(x).parametrized + + src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) + dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) + + if len(dest_traced) != len(src_traced) and self.raise_if_mismatch: + raise Exception( + f"Numbers of operations are different. Source module has {len(src_traced)} operations while" + f" destination module has {len(dest_traced)}." + ) + + for dest_m, src_m in zip(dest_traced, src_traced): + dest_m.load_state_dict(src_m.state_dict()) + if self.verbose == 1: + print(f"Transfered from={src_m} to={dest_m}") + + +class FakeRegNetVisslWrapper(nn.Module): + """ + Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file. + """ + + def __init__(self, model: nn.Module): + super().__init__() + + feature_blocks: List[Tuple[str, nn.Module]] = [] + # - get the stem + feature_blocks.append(("conv1", model.stem)) + # - get all the feature blocks + for k, v in model.trunk_output.named_children(): + assert k.startswith("block"), f"Unexpected layer name {k}" + block_index = len(feature_blocks) + 1 + feature_blocks.append((f"res{block_index}", v)) + + self._feature_blocks = nn.ModuleDict(feature_blocks) + + def forward(self, x: Tensor): + return get_trunk_forward_outputs( + x, + out_feat_keys=None, + feature_blocks=self._feature_blocks, + ) + + +class NameToFromModelFuncMap(dict): + """ + A Dictionary with some additional logic to return a function that creates the correct original model. + """ + + def convert_name_to_timm(self, x: str) -> str: + x_split = x.split("-") + return x_split[0] + x_split[1] + "_" + "".join(x_split[2:]) + + def __getitem__(self, x: str) -> Callable[[], Tuple[nn.Module, Dict]]: + # default to timm! + if x not in self: + x = self.convert_name_to_timm(x) + val = partial(lambda: (timm.create_model(x, pretrained=True).eval(), None)) + + else: + val = super().__getitem__(x) + + return val + + +class NameToOurModelFuncMap(dict): + """ + A Dictionary with some additional logic to return the correct hugging face RegNet class reference. + """ + + def __getitem__(self, x: str) -> Callable[[], nn.Module]: + if "seer" in x and "in1k" not in x: + val = RegNetModel + else: + val = RegNetForImageClassification + return val + + +def manually_copy_vissl_head(from_state_dict, to_state_dict, keys: List[Tuple[str, str]]): + for from_key, to_key in keys: + to_state_dict[to_key] = from_state_dict[from_key].clone() + print(f"Copied key={from_key} to={to_key}") + return to_state_dict + + +def convert_weight_and_push( + name: str, + from_model_func: Callable[[], nn.Module], + our_model_func: Callable[[], nn.Module], + config: RegNetConfig, + save_directory: Path, + push_to_hub: bool = True, +): + print(f"Converting {name}...") + with torch.no_grad(): + from_model, from_state_dict = from_model_func() + our_model = our_model_func(config).eval() + module_transfer = ModuleTransfer(src=from_model, dest=our_model, raise_if_mismatch=False) + x = torch.randn((1, 3, 224, 224)) + module_transfer(x) + + if from_state_dict is not None: + keys = [] + # for seer - in1k finetuned we have to manually copy the head + if "seer" in name and "in1k" in name: + keys = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] + to_state_dict = manually_copy_vissl_head(from_state_dict, our_model.state_dict(), keys) + our_model.load_state_dict(to_state_dict) + + our_outputs = our_model(x, output_hidden_states=True) + our_output = ( + our_outputs.logits if isinstance(our_model, RegNetForImageClassification) else our_outputs.last_hidden_state + ) + + from_output = from_model(x) + from_output = from_output[-1] if isinstance(from_output, list) else from_output + + # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state + if "seer" in name and "in1k" in name: + our_output = our_outputs.hidden_states[-1] + + assert torch.allclose(from_output, our_output), "The model logits don't match the original one." + + if push_to_hub: + our_model.push_to_hub( + repo_path_or_name=save_directory / name, + commit_message="Add model", + use_temp_dir=True, + ) + + size = 224 if "seer" not in name else 384 + # we can use the convnext one + image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=size) + image_processor.push_to_hub( + repo_path_or_name=save_directory / name, + commit_message="Add image processor", + use_temp_dir=True, + ) + + print(f"Pushed {name}") + + +def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True): + filename = "imagenet-1k-id2label.json" + num_labels = 1000 + expected_shape = (1, num_labels) + + repo_id = "huggingface/label-files" + num_labels = num_labels + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = {int(k): v for k, v in id2label.items()} + + id2label = id2label + label2id = {v: k for k, v in id2label.items()} + + ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) + + names_to_config = { + "regnet-x-002": ImageNetPreTrainedConfig( + depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type="x" + ), + "regnet-x-004": ImageNetPreTrainedConfig( + depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type="x" + ), + "regnet-x-006": ImageNetPreTrainedConfig( + depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type="x" + ), + "regnet-x-008": ImageNetPreTrainedConfig( + depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type="x" + ), + "regnet-x-016": ImageNetPreTrainedConfig( + depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type="x" + ), + "regnet-x-032": ImageNetPreTrainedConfig( + depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type="x" + ), + "regnet-x-040": ImageNetPreTrainedConfig( + depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type="x" + ), + "regnet-x-064": ImageNetPreTrainedConfig( + depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type="x" + ), + "regnet-x-080": ImageNetPreTrainedConfig( + depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type="x" + ), + "regnet-x-120": ImageNetPreTrainedConfig( + depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type="x" + ), + "regnet-x-160": ImageNetPreTrainedConfig( + depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type="x" + ), + "regnet-x-320": ImageNetPreTrainedConfig( + depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type="x" + ), + # y variant + "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8), + "regnet-y-004": ImageNetPreTrainedConfig( + depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 + ), + "regnet-y-006": ImageNetPreTrainedConfig( + depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 + ), + "regnet-y-008": ImageNetPreTrainedConfig( + depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 + ), + "regnet-y-016": ImageNetPreTrainedConfig( + depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 + ), + "regnet-y-032": ImageNetPreTrainedConfig( + depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 + ), + "regnet-y-040": ImageNetPreTrainedConfig( + depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 + ), + "regnet-y-064": ImageNetPreTrainedConfig( + depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 + ), + "regnet-y-080": ImageNetPreTrainedConfig( + depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 + ), + "regnet-y-120": ImageNetPreTrainedConfig( + depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 + ), + "regnet-y-160": ImageNetPreTrainedConfig( + depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 + ), + "regnet-y-320": ImageNetPreTrainedConfig( + depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 + ), + # models created by SEER -> https://arxiv.org/abs/2202.08360 + "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232), + "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328), + "regnet-y-1280-seer": RegNetConfig( + depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 + ), + "regnet-y-2560-seer": RegNetConfig( + depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 + ), + "regnet-y-10b-seer": ImageNetPreTrainedConfig( + depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 + ), + # finetuned on imagenet + "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( + depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 + ), + "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( + depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 + ), + "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( + depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 + ), + "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( + depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 + ), + "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( + depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 + ), + } + + names_to_ours_model_map = NameToOurModelFuncMap() + names_to_from_model_map = NameToFromModelFuncMap() + # add seer weights logic + + def load_using_classy_vision(checkpoint_url: str, model_func: Callable[[], nn.Module]) -> Tuple[nn.Module, Dict]: + files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location="cpu") + model = model_func() + # check if we have a head, if yes add it + model_state_dict = files["classy_state_dict"]["base_model"]["model"] + state_dict = model_state_dict["trunk"] + model.load_state_dict(state_dict) + return model.eval(), model_state_dict["heads"] + + # pretrained + names_to_from_model_map["regnet-y-320-seer"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch", + lambda: FakeRegNetVisslWrapper(RegNetY32gf()), + ) + + names_to_from_model_map["regnet-y-640-seer"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch", + lambda: FakeRegNetVisslWrapper(RegNetY64gf()), + ) + + names_to_from_model_map["regnet-y-1280-seer"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch", + lambda: FakeRegNetVisslWrapper(RegNetY128gf()), + ) + + names_to_from_model_map["regnet-y-10b-seer"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", + lambda: FakeRegNetVisslWrapper( + RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)) + ), + ) + + # IN1K finetuned + names_to_from_model_map["regnet-y-320-seer-in1k"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch", + lambda: FakeRegNetVisslWrapper(RegNetY32gf()), + ) + + names_to_from_model_map["regnet-y-640-seer-in1k"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch", + lambda: FakeRegNetVisslWrapper(RegNetY64gf()), + ) + + names_to_from_model_map["regnet-y-1280-seer-in1k"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch", + lambda: FakeRegNetVisslWrapper(RegNetY128gf()), + ) + + names_to_from_model_map["regnet-y-10b-seer-in1k"] = partial( + load_using_classy_vision, + "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", + lambda: FakeRegNetVisslWrapper( + RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)) + ), + ) + + if model_name: + convert_weight_and_push( + model_name, + names_to_from_model_map[model_name], + names_to_ours_model_map[model_name], + names_to_config[model_name], + save_directory, + push_to_hub, + ) + else: + for model_name, config in names_to_config.items(): + convert_weight_and_push( + model_name, + names_to_from_model_map[model_name], + names_to_ours_model_map[model_name], + config, + save_directory, + push_to_hub, + ) + return config, expected_shape + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default=None, + type=str, + help=( + "The name of the model you wish to convert, it must be one of the supported regnet* architecture," + " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." + ), + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=Path, + required=True, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument( + "--push_to_hub", + default=True, + type=bool, + required=False, + help="If True, push model and image processor to the hub.", + ) + + args = parser.parse_args() + + pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path + pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) + convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_flax_regnet.py b/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_flax_regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..fc4258257bdb192bae7a7e564fd65de1e3003210 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_flax_regnet.py @@ -0,0 +1,819 @@ +# coding=utf-8 +# Copyright 2023 The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from functools import partial +from typing import Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.traverse_util import flatten_dict, unflatten_dict + +from transformers import RegNetConfig +from transformers.modeling_flax_outputs import ( + FlaxBaseModelOutputWithNoAttention, + FlaxBaseModelOutputWithPooling, + FlaxBaseModelOutputWithPoolingAndNoAttention, + FlaxImageClassifierOutputWithNoAttention, +) +from transformers.modeling_flax_utils import ( + ACT2FN, + FlaxPreTrainedModel, + append_replace_return_docstrings, + overwrite_call_docstring, +) +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, +) + + +REGNET_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading, saving and converting weights from PyTorch models) + + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +REGNET_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`RegNetImageProcessor.__call__`] for details. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.resnet.modeling_flax_resnet.Identity +class Identity(nn.Module): + """Identity function.""" + + @nn.compact + def __call__(self, x, **kwargs): + return x + + +class FlaxRegNetConvLayer(nn.Module): + out_channels: int + kernel_size: int = 3 + stride: int = 1 + groups: int = 1 + activation: Optional[str] = "relu" + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.convolution = nn.Conv( + self.out_channels, + kernel_size=(self.kernel_size, self.kernel_size), + strides=self.stride, + padding=self.kernel_size // 2, + feature_group_count=self.groups, + use_bias=False, + kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), + dtype=self.dtype, + ) + self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) + self.activation_func = ACT2FN[self.activation] if self.activation is not None else Identity() + + def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + hidden_state = self.convolution(hidden_state) + hidden_state = self.normalization(hidden_state, use_running_average=deterministic) + hidden_state = self.activation_func(hidden_state) + return hidden_state + + +class FlaxRegNetEmbeddings(nn.Module): + config: RegNetConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.embedder = FlaxRegNetConvLayer( + self.config.embedding_size, + kernel_size=3, + stride=2, + activation=self.config.hidden_act, + dtype=self.dtype, + ) + + def __call__(self, pixel_values: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + num_channels = pixel_values.shape[-1] + if num_channels != self.config.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + hidden_state = self.embedder(pixel_values, deterministic=deterministic) + return hidden_state + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetShortCut with ResNet->RegNet +class FlaxRegNetShortCut(nn.Module): + """ + RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to + downsample the input using `stride=2`. + """ + + out_channels: int + stride: int = 2 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.convolution = nn.Conv( + self.out_channels, + kernel_size=(1, 1), + strides=self.stride, + use_bias=False, + kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), + dtype=self.dtype, + ) + self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) + + def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + hidden_state = self.convolution(x) + hidden_state = self.normalization(hidden_state, use_running_average=deterministic) + return hidden_state + + +class FlaxRegNetSELayerCollection(nn.Module): + in_channels: int + reduced_channels: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.conv_1 = nn.Conv( + self.reduced_channels, + kernel_size=(1, 1), + kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), + dtype=self.dtype, + name="0", + ) # 0 is the name used in corresponding pytorch implementation + self.conv_2 = nn.Conv( + self.in_channels, + kernel_size=(1, 1), + kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), + dtype=self.dtype, + name="2", + ) # 2 is the name used in corresponding pytorch implementation + + def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: + hidden_state = self.conv_1(hidden_state) + hidden_state = nn.relu(hidden_state) + hidden_state = self.conv_2(hidden_state) + attention = nn.sigmoid(hidden_state) + + return attention + + +class FlaxRegNetSELayer(nn.Module): + """ + Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). + """ + + in_channels: int + reduced_channels: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.pooler = partial(nn.avg_pool, padding=((0, 0), (0, 0))) + self.attention = FlaxRegNetSELayerCollection(self.in_channels, self.reduced_channels, dtype=self.dtype) + + def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: + pooled = self.pooler( + hidden_state, + window_shape=(hidden_state.shape[1], hidden_state.shape[2]), + strides=(hidden_state.shape[1], hidden_state.shape[2]), + ) + attention = self.attention(pooled) + hidden_state = hidden_state * attention + return hidden_state + + +class FlaxRegNetXLayerCollection(nn.Module): + config: RegNetConfig + out_channels: int + stride: int = 1 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + groups = max(1, self.out_channels // self.config.groups_width) + + self.layer = [ + FlaxRegNetConvLayer( + self.out_channels, + kernel_size=1, + activation=self.config.hidden_act, + dtype=self.dtype, + name="0", + ), + FlaxRegNetConvLayer( + self.out_channels, + stride=self.stride, + groups=groups, + activation=self.config.hidden_act, + dtype=self.dtype, + name="1", + ), + FlaxRegNetConvLayer( + self.out_channels, + kernel_size=1, + activation=None, + dtype=self.dtype, + name="2", + ), + ] + + def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + for layer in self.layer: + hidden_state = layer(hidden_state, deterministic=deterministic) + return hidden_state + + +class FlaxRegNetXLayer(nn.Module): + """ + RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. + """ + + config: RegNetConfig + in_channels: int + out_channels: int + stride: int = 1 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 + self.shortcut = ( + FlaxRegNetShortCut( + self.out_channels, + stride=self.stride, + dtype=self.dtype, + ) + if should_apply_shortcut + else Identity() + ) + self.layer = FlaxRegNetXLayerCollection( + self.config, + in_channels=self.in_channels, + out_channels=self.out_channels, + stride=self.stride, + dtype=self.dtype, + ) + self.activation_func = ACT2FN[self.config.hidden_act] + + def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + residual = hidden_state + hidden_state = self.layer(hidden_state) + residual = self.shortcut(residual, deterministic=deterministic) + hidden_state += residual + hidden_state = self.activation_func(hidden_state) + return hidden_state + + +class FlaxRegNetYLayerCollection(nn.Module): + config: RegNetConfig + in_channels: int + out_channels: int + stride: int = 1 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + groups = max(1, self.out_channels // self.config.groups_width) + + self.layer = [ + FlaxRegNetConvLayer( + self.out_channels, + kernel_size=1, + activation=self.config.hidden_act, + dtype=self.dtype, + name="0", + ), + FlaxRegNetConvLayer( + self.out_channels, + stride=self.stride, + groups=groups, + activation=self.config.hidden_act, + dtype=self.dtype, + name="1", + ), + FlaxRegNetSELayer( + self.out_channels, + reduced_channels=int(round(self.in_channels / 4)), + dtype=self.dtype, + name="2", + ), + FlaxRegNetConvLayer( + self.out_channels, + kernel_size=1, + activation=None, + dtype=self.dtype, + name="3", + ), + ] + + def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: + for layer in self.layer: + hidden_state = layer(hidden_state) + return hidden_state + + +class FlaxRegNetYLayer(nn.Module): + """ + RegNet's Y layer: an X layer with Squeeze and Excitation. + """ + + config: RegNetConfig + in_channels: int + out_channels: int + stride: int = 1 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 + + self.shortcut = ( + FlaxRegNetShortCut( + self.out_channels, + stride=self.stride, + dtype=self.dtype, + ) + if should_apply_shortcut + else Identity() + ) + self.layer = FlaxRegNetYLayerCollection( + self.config, + in_channels=self.in_channels, + out_channels=self.out_channels, + stride=self.stride, + dtype=self.dtype, + ) + self.activation_func = ACT2FN[self.config.hidden_act] + + def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + residual = hidden_state + hidden_state = self.layer(hidden_state) + residual = self.shortcut(residual, deterministic=deterministic) + hidden_state += residual + hidden_state = self.activation_func(hidden_state) + return hidden_state + + +class FlaxRegNetStageLayersCollection(nn.Module): + """ + A RegNet stage composed by stacked layers. + """ + + config: RegNetConfig + in_channels: int + out_channels: int + stride: int = 2 + depth: int = 2 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + layer = FlaxRegNetXLayer if self.config.layer_type == "x" else FlaxRegNetYLayer + + layers = [ + # downsampling is done in the first layer with stride of 2 + layer( + self.config, + self.in_channels, + self.out_channels, + stride=self.stride, + dtype=self.dtype, + name="0", + ) + ] + + for i in range(self.depth - 1): + layers.append( + layer( + self.config, + self.out_channels, + self.out_channels, + dtype=self.dtype, + name=str(i + 1), + ) + ) + + self.layers = layers + + def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + hidden_state = x + for layer in self.layers: + hidden_state = layer(hidden_state, deterministic=deterministic) + return hidden_state + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetStage with ResNet->RegNet +class FlaxRegNetStage(nn.Module): + """ + A RegNet stage composed by stacked layers. + """ + + config: RegNetConfig + in_channels: int + out_channels: int + stride: int = 2 + depth: int = 2 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.layers = FlaxRegNetStageLayersCollection( + self.config, + in_channels=self.in_channels, + out_channels=self.out_channels, + stride=self.stride, + depth=self.depth, + dtype=self.dtype, + ) + + def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + return self.layers(x, deterministic=deterministic) + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetStageCollection with ResNet->RegNet +class FlaxRegNetStageCollection(nn.Module): + config: RegNetConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + in_out_channels = zip(self.config.hidden_sizes, self.config.hidden_sizes[1:]) + stages = [ + FlaxRegNetStage( + self.config, + self.config.embedding_size, + self.config.hidden_sizes[0], + stride=2 if self.config.downsample_in_first_stage else 1, + depth=self.config.depths[0], + dtype=self.dtype, + name="0", + ) + ] + + for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, self.config.depths[1:])): + stages.append( + FlaxRegNetStage(self.config, in_channels, out_channels, depth=depth, dtype=self.dtype, name=str(i + 1)) + ) + + self.stages = stages + + def __call__( + self, + hidden_state: jnp.ndarray, + output_hidden_states: bool = False, + deterministic: bool = True, + ) -> FlaxBaseModelOutputWithNoAttention: + hidden_states = () if output_hidden_states else None + + for stage_module in self.stages: + if output_hidden_states: + hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) + + hidden_state = stage_module(hidden_state, deterministic=deterministic) + + return hidden_state, hidden_states + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetEncoder with ResNet->RegNet +class FlaxRegNetEncoder(nn.Module): + config: RegNetConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.stages = FlaxRegNetStageCollection(self.config, dtype=self.dtype) + + def __call__( + self, + hidden_state: jnp.ndarray, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ) -> FlaxBaseModelOutputWithNoAttention: + hidden_state, hidden_states = self.stages( + hidden_state, output_hidden_states=output_hidden_states, deterministic=deterministic + ) + + if output_hidden_states: + hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) + + if not return_dict: + return tuple(v for v in [hidden_state, hidden_states] if v is not None) + + return FlaxBaseModelOutputWithNoAttention( + last_hidden_state=hidden_state, + hidden_states=hidden_states, + ) + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetPreTrainedModel with ResNet->RegNet,resnet->regnet,RESNET->REGNET +class FlaxRegNetPreTrainedModel(FlaxPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = RegNetConfig + base_model_prefix = "regnet" + main_input_name = "pixel_values" + module_class: nn.Module = None + + def __init__( + self, + config: RegNetConfig, + input_shape=(1, 224, 224, 3), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + if input_shape is None: + input_shape = (1, config.image_size, config.image_size, config.num_channels) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + pixel_values = jnp.zeros(input_shape, dtype=self.dtype) + + rngs = {"params": rng} + + random_params = self.module.init(rngs, pixel_values, return_dict=False) + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) + def __call__( + self, + pixel_values, + params: dict = None, + train: bool = False, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) + + # Handle any PRNG if needed + rngs = {} + + return self.module.apply( + { + "params": params["params"] if params is not None else self.params["params"], + "batch_stats": params["batch_stats"] if params is not None else self.params["batch_stats"], + }, + jnp.array(pixel_values, dtype=jnp.float32), + not train, + output_hidden_states, + return_dict, + rngs=rngs, + mutable=["batch_stats"] if train else False, # Returing tuple with batch_stats only when train is True + ) + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetModule with ResNet->RegNet +class FlaxRegNetModule(nn.Module): + config: RegNetConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.embedder = FlaxRegNetEmbeddings(self.config, dtype=self.dtype) + self.encoder = FlaxRegNetEncoder(self.config, dtype=self.dtype) + + # Adaptive average pooling used in resnet + self.pooler = partial( + nn.avg_pool, + padding=((0, 0), (0, 0)), + ) + + def __call__( + self, + pixel_values, + deterministic: bool = True, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> FlaxBaseModelOutputWithPoolingAndNoAttention: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + embedding_output = self.embedder(pixel_values, deterministic=deterministic) + + encoder_outputs = self.encoder( + embedding_output, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + last_hidden_state = encoder_outputs[0] + + pooled_output = self.pooler( + last_hidden_state, + window_shape=(last_hidden_state.shape[1], last_hidden_state.shape[2]), + strides=(last_hidden_state.shape[1], last_hidden_state.shape[2]), + ).transpose(0, 3, 1, 2) + + last_hidden_state = last_hidden_state.transpose(0, 3, 1, 2) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return FlaxBaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + ) + + +@add_start_docstrings( + "The bare RegNet model outputting raw features without any specific head on top.", + REGNET_START_DOCSTRING, +) +class FlaxRegNetModel(FlaxRegNetPreTrainedModel): + module_class = FlaxRegNetModule + + +FLAX_VISION_MODEL_DOCSTRING = """ + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, FlaxRegNetModel + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("facebook/regnet-y-040") + >>> model = FlaxRegNetModel.from_pretrained("facebook/regnet-y-040") + + >>> inputs = image_processor(images=image, return_tensors="np") + >>> outputs = model(**inputs) + >>> last_hidden_states = outputs.last_hidden_state + ``` +""" + +overwrite_call_docstring(FlaxRegNetModel, FLAX_VISION_MODEL_DOCSTRING) +append_replace_return_docstrings( + FlaxRegNetModel, + output_type=FlaxBaseModelOutputWithPooling, + config_class=RegNetConfig, +) + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetClassifierCollection with ResNet->RegNet +class FlaxRegNetClassifierCollection(nn.Module): + config: RegNetConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype, name="1") + + def __call__(self, x: jnp.ndarray) -> jnp.ndarray: + return self.classifier(x) + + +# Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetForImageClassificationModule with ResNet->RegNet,resnet->regnet,RESNET->REGNET +class FlaxRegNetForImageClassificationModule(nn.Module): + config: RegNetConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.regnet = FlaxRegNetModule(config=self.config, dtype=self.dtype) + + if self.config.num_labels > 0: + self.classifier = FlaxRegNetClassifierCollection(self.config, dtype=self.dtype) + else: + self.classifier = Identity() + + def __call__( + self, + pixel_values=None, + deterministic: bool = True, + output_hidden_states=None, + return_dict=None, + ): + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.regnet( + pixel_values, + deterministic=deterministic, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + + logits = self.classifier(pooled_output[:, :, 0, 0]) + + if not return_dict: + output = (logits,) + outputs[2:] + return output + + return FlaxImageClassifierOutputWithNoAttention(logits=logits, hidden_states=outputs.hidden_states) + + +@add_start_docstrings( + """ + RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for + ImageNet. + """, + REGNET_START_DOCSTRING, +) +class FlaxRegNetForImageClassification(FlaxRegNetPreTrainedModel): + module_class = FlaxRegNetForImageClassificationModule + + +FLAX_VISION_CLASSIF_DOCSTRING = """ + Returns: + + Example: + + ```python + >>> from transformers import AutoImageProcessor, FlaxRegNetForImageClassification + >>> from PIL import Image + >>> import jax + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("facebook/regnet-y-040") + >>> model = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") + + >>> inputs = image_processor(images=image, return_tensors="np") + >>> outputs = model(**inputs) + >>> logits = outputs.logits + + >>> # model predicts one of the 1000 ImageNet classes + >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1) + >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()]) + ``` +""" + +overwrite_call_docstring(FlaxRegNetForImageClassification, FLAX_VISION_CLASSIF_DOCSTRING) +append_replace_return_docstrings( + FlaxRegNetForImageClassification, + output_type=FlaxImageClassifierOutputWithNoAttention, + config_class=RegNetConfig, +) diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_regnet.py b/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..915e4cbae46bee53c101768bc4d0969a212fdc63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_regnet.py @@ -0,0 +1,445 @@ +# coding=utf-8 +# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch RegNet model.""" + +from typing import Optional + +import torch +import torch.utils.checkpoint +from torch import Tensor, nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_outputs import ( + BaseModelOutputWithNoAttention, + BaseModelOutputWithPoolingAndNoAttention, + ImageClassifierOutputWithNoAttention, +) +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_regnet import RegNetConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "RegNetConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "facebook/regnet-y-040" +_EXPECTED_OUTPUT_SHAPE = [1, 1088, 7, 7] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "facebook/regnet-y-040" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +from ..deprecated._archive_maps import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class RegNetConvLayer(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int = 3, + stride: int = 1, + groups: int = 1, + activation: Optional[str] = "relu", + ): + super().__init__() + self.convolution = nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=groups, + bias=False, + ) + self.normalization = nn.BatchNorm2d(out_channels) + self.activation = ACT2FN[activation] if activation is not None else nn.Identity() + + def forward(self, hidden_state): + hidden_state = self.convolution(hidden_state) + hidden_state = self.normalization(hidden_state) + hidden_state = self.activation(hidden_state) + return hidden_state + + +class RegNetEmbeddings(nn.Module): + """ + RegNet Embedddings (stem) composed of a single aggressive convolution. + """ + + def __init__(self, config: RegNetConfig): + super().__init__() + self.embedder = RegNetConvLayer( + config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act + ) + self.num_channels = config.num_channels + + def forward(self, pixel_values): + num_channels = pixel_values.shape[1] + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + hidden_state = self.embedder(pixel_values) + return hidden_state + + +# Copied from transformers.models.resnet.modeling_resnet.ResNetShortCut with ResNet->RegNet +class RegNetShortCut(nn.Module): + """ + RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to + downsample the input using `stride=2`. + """ + + def __init__(self, in_channels: int, out_channels: int, stride: int = 2): + super().__init__() + self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False) + self.normalization = nn.BatchNorm2d(out_channels) + + def forward(self, input: Tensor) -> Tensor: + hidden_state = self.convolution(input) + hidden_state = self.normalization(hidden_state) + return hidden_state + + +class RegNetSELayer(nn.Module): + """ + Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). + """ + + def __init__(self, in_channels: int, reduced_channels: int): + super().__init__() + + self.pooler = nn.AdaptiveAvgPool2d((1, 1)) + self.attention = nn.Sequential( + nn.Conv2d(in_channels, reduced_channels, kernel_size=1), + nn.ReLU(), + nn.Conv2d(reduced_channels, in_channels, kernel_size=1), + nn.Sigmoid(), + ) + + def forward(self, hidden_state): + # b c h w -> b c 1 1 + pooled = self.pooler(hidden_state) + attention = self.attention(pooled) + hidden_state = hidden_state * attention + return hidden_state + + +class RegNetXLayer(nn.Module): + """ + RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. + """ + + def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1): + super().__init__() + should_apply_shortcut = in_channels != out_channels or stride != 1 + groups = max(1, out_channels // config.groups_width) + self.shortcut = ( + RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() + ) + self.layer = nn.Sequential( + RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), + RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), + RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None), + ) + self.activation = ACT2FN[config.hidden_act] + + def forward(self, hidden_state): + residual = hidden_state + hidden_state = self.layer(hidden_state) + residual = self.shortcut(residual) + hidden_state += residual + hidden_state = self.activation(hidden_state) + return hidden_state + + +class RegNetYLayer(nn.Module): + """ + RegNet's Y layer: an X layer with Squeeze and Excitation. + """ + + def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1): + super().__init__() + should_apply_shortcut = in_channels != out_channels or stride != 1 + groups = max(1, out_channels // config.groups_width) + self.shortcut = ( + RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() + ) + self.layer = nn.Sequential( + RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), + RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), + RegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4))), + RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None), + ) + self.activation = ACT2FN[config.hidden_act] + + def forward(self, hidden_state): + residual = hidden_state + hidden_state = self.layer(hidden_state) + residual = self.shortcut(residual) + hidden_state += residual + hidden_state = self.activation(hidden_state) + return hidden_state + + +class RegNetStage(nn.Module): + """ + A RegNet stage composed by stacked layers. + """ + + def __init__( + self, + config: RegNetConfig, + in_channels: int, + out_channels: int, + stride: int = 2, + depth: int = 2, + ): + super().__init__() + + layer = RegNetXLayer if config.layer_type == "x" else RegNetYLayer + + self.layers = nn.Sequential( + # downsampling is done in the first layer with stride of 2 + layer( + config, + in_channels, + out_channels, + stride=stride, + ), + *[layer(config, out_channels, out_channels) for _ in range(depth - 1)], + ) + + def forward(self, hidden_state): + hidden_state = self.layers(hidden_state) + return hidden_state + + +class RegNetEncoder(nn.Module): + def __init__(self, config: RegNetConfig): + super().__init__() + self.stages = nn.ModuleList([]) + # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input + self.stages.append( + RegNetStage( + config, + config.embedding_size, + config.hidden_sizes[0], + stride=2 if config.downsample_in_first_stage else 1, + depth=config.depths[0], + ) + ) + in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) + for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]): + self.stages.append(RegNetStage(config, in_channels, out_channels, depth=depth)) + + def forward( + self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True + ) -> BaseModelOutputWithNoAttention: + hidden_states = () if output_hidden_states else None + + for stage_module in self.stages: + if output_hidden_states: + hidden_states = hidden_states + (hidden_state,) + + hidden_state = stage_module(hidden_state) + + if output_hidden_states: + hidden_states = hidden_states + (hidden_state,) + + if not return_dict: + return tuple(v for v in [hidden_state, hidden_states] if v is not None) + + return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) + + +class RegNetPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = RegNetConfig + base_model_prefix = "regnet" + main_input_name = "pixel_values" + + # Copied from transformers.models.resnet.modeling_resnet.ResNetPreTrainedModel._init_weights + def _init_weights(self, module): + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(module.weight, 1) + nn.init.constant_(module.bias, 0) + + +REGNET_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and + behavior. + + Parameters: + config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +REGNET_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`ConvNextImageProcessor.__call__`] for details. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare RegNet model outputting raw features without any specific head on top.", + REGNET_START_DOCSTRING, +) +# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet +class RegNetModel(RegNetPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + self.embedder = RegNetEmbeddings(config) + self.encoder = RegNetEncoder(config) + self.pooler = nn.AdaptiveAvgPool2d((1, 1)) + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndNoAttention, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None + ) -> BaseModelOutputWithPoolingAndNoAttention: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + embedding_output = self.embedder(pixel_values) + + encoder_outputs = self.encoder( + embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict + ) + + last_hidden_state = encoder_outputs[0] + + pooled_output = self.pooler(last_hidden_state) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + ) + + +@add_start_docstrings( + """ + RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for + ImageNet. + """, + REGNET_START_DOCSTRING, +) +# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet +class RegNetForImageClassification(RegNetPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.regnet = RegNetModel(config) + # classification head + self.classifier = nn.Sequential( + nn.Flatten(), + nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(), + ) + # initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutputWithNoAttention, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> ImageClassifierOutputWithNoAttention: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.regnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + + logits = self.classifier(pooled_output) + + loss = None + + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return (loss,) + output if loss is not None else output + + return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) diff --git a/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_tf_regnet.py b/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_tf_regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c296027fc6c3fde52150b481c864b98a4d6089 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/regnet/modeling_tf_regnet.py @@ -0,0 +1,611 @@ +# coding=utf-8 +# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TensorFlow RegNet model.""" + +from typing import Optional, Tuple, Union + +import tensorflow as tf + +from ...activations_tf import ACT2FN +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_tf_outputs import ( + TFBaseModelOutputWithNoAttention, + TFBaseModelOutputWithPoolingAndNoAttention, + TFSequenceClassifierOutput, +) +from ...modeling_tf_utils import ( + TFPreTrainedModel, + TFSequenceClassificationLoss, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import shape_list +from ...utils import logging +from .configuration_regnet import RegNetConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "RegNetConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "facebook/regnet-y-040" +_EXPECTED_OUTPUT_SHAPE = [1, 1088, 7, 7] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "facebook/regnet-y-040" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +from ..deprecated._archive_maps import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class TFRegNetConvLayer(keras.layers.Layer): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int = 3, + stride: int = 1, + groups: int = 1, + activation: Optional[str] = "relu", + **kwargs, + ): + super().__init__(**kwargs) + # The padding and conv has been verified in + # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb + self.padding = keras.layers.ZeroPadding2D(padding=kernel_size // 2) + self.convolution = keras.layers.Conv2D( + filters=out_channels, + kernel_size=kernel_size, + strides=stride, + padding="VALID", + groups=groups, + use_bias=False, + name="convolution", + ) + self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") + self.activation = ACT2FN[activation] if activation is not None else tf.identity + self.in_channels = in_channels + self.out_channels = out_channels + + def call(self, hidden_state): + hidden_state = self.convolution(self.padding(hidden_state)) + hidden_state = self.normalization(hidden_state) + hidden_state = self.activation(hidden_state) + return hidden_state + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convolution", None) is not None: + with tf.name_scope(self.convolution.name): + self.convolution.build([None, None, None, self.in_channels]) + if getattr(self, "normalization", None) is not None: + with tf.name_scope(self.normalization.name): + self.normalization.build([None, None, None, self.out_channels]) + + +class TFRegNetEmbeddings(keras.layers.Layer): + """ + RegNet Embeddings (stem) composed of a single aggressive convolution. + """ + + def __init__(self, config: RegNetConfig, **kwargs): + super().__init__(**kwargs) + self.num_channels = config.num_channels + self.embedder = TFRegNetConvLayer( + in_channels=config.num_channels, + out_channels=config.embedding_size, + kernel_size=3, + stride=2, + activation=config.hidden_act, + name="embedder", + ) + + def call(self, pixel_values): + num_channels = shape_list(pixel_values)[1] + if tf.executing_eagerly() and num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + + # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. + # So change the input format from `NCHW` to `NHWC`. + # shape = (batch_size, in_height, in_width, in_channels=num_channels) + pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) + hidden_state = self.embedder(pixel_values) + return hidden_state + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embedder", None) is not None: + with tf.name_scope(self.embedder.name): + self.embedder.build(None) + + +class TFRegNetShortCut(keras.layers.Layer): + """ + RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to + downsample the input using `stride=2`. + """ + + def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs): + super().__init__(**kwargs) + self.convolution = keras.layers.Conv2D( + filters=out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution" + ) + self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") + self.in_channels = in_channels + self.out_channels = out_channels + + def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor: + return self.normalization(self.convolution(inputs), training=training) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convolution", None) is not None: + with tf.name_scope(self.convolution.name): + self.convolution.build([None, None, None, self.in_channels]) + if getattr(self, "normalization", None) is not None: + with tf.name_scope(self.normalization.name): + self.normalization.build([None, None, None, self.out_channels]) + + +class TFRegNetSELayer(keras.layers.Layer): + """ + Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). + """ + + def __init__(self, in_channels: int, reduced_channels: int, **kwargs): + super().__init__(**kwargs) + self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler") + self.attention = [ + keras.layers.Conv2D(filters=reduced_channels, kernel_size=1, activation="relu", name="attention.0"), + keras.layers.Conv2D(filters=in_channels, kernel_size=1, activation="sigmoid", name="attention.2"), + ] + self.in_channels = in_channels + self.reduced_channels = reduced_channels + + def call(self, hidden_state): + # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] + pooled = self.pooler(hidden_state) + for layer_module in self.attention: + pooled = layer_module(pooled) + hidden_state = hidden_state * pooled + return hidden_state + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "pooler", None) is not None: + with tf.name_scope(self.pooler.name): + self.pooler.build((None, None, None, None)) + if getattr(self, "attention", None) is not None: + with tf.name_scope(self.attention[0].name): + self.attention[0].build([None, None, None, self.in_channels]) + with tf.name_scope(self.attention[1].name): + self.attention[1].build([None, None, None, self.reduced_channels]) + + +class TFRegNetXLayer(keras.layers.Layer): + """ + RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. + """ + + def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs): + super().__init__(**kwargs) + should_apply_shortcut = in_channels != out_channels or stride != 1 + groups = max(1, out_channels // config.groups_width) + self.shortcut = ( + TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") + if should_apply_shortcut + else keras.layers.Activation("linear", name="shortcut") + ) + # `self.layers` instead of `self.layer` because that is a reserved argument. + self.layers = [ + TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"), + TFRegNetConvLayer( + out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1" + ), + TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.2"), + ] + self.activation = ACT2FN[config.hidden_act] + + def call(self, hidden_state): + residual = hidden_state + for layer_module in self.layers: + hidden_state = layer_module(hidden_state) + residual = self.shortcut(residual) + hidden_state += residual + hidden_state = self.activation(hidden_state) + return hidden_state + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "shortcut", None) is not None: + with tf.name_scope(self.shortcut.name): + self.shortcut.build(None) + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFRegNetYLayer(keras.layers.Layer): + """ + RegNet's Y layer: an X layer with Squeeze and Excitation. + """ + + def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs): + super().__init__(**kwargs) + should_apply_shortcut = in_channels != out_channels or stride != 1 + groups = max(1, out_channels // config.groups_width) + self.shortcut = ( + TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") + if should_apply_shortcut + else keras.layers.Activation("linear", name="shortcut") + ) + self.layers = [ + TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"), + TFRegNetConvLayer( + out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1" + ), + TFRegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4)), name="layer.2"), + TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.3"), + ] + self.activation = ACT2FN[config.hidden_act] + + def call(self, hidden_state): + residual = hidden_state + for layer_module in self.layers: + hidden_state = layer_module(hidden_state) + residual = self.shortcut(residual) + hidden_state += residual + hidden_state = self.activation(hidden_state) + return hidden_state + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "shortcut", None) is not None: + with tf.name_scope(self.shortcut.name): + self.shortcut.build(None) + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFRegNetStage(keras.layers.Layer): + """ + A RegNet stage composed by stacked layers. + """ + + def __init__( + self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs + ): + super().__init__(**kwargs) + + layer = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer + self.layers = [ + # downsampling is done in the first layer with stride of 2 + layer(config, in_channels, out_channels, stride=stride, name="layers.0"), + *[layer(config, out_channels, out_channels, name=f"layers.{i+1}") for i in range(depth - 1)], + ] + + def call(self, hidden_state): + for layer_module in self.layers: + hidden_state = layer_module(hidden_state) + return hidden_state + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFRegNetEncoder(keras.layers.Layer): + def __init__(self, config: RegNetConfig, **kwargs): + super().__init__(**kwargs) + self.stages = [] + # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input + self.stages.append( + TFRegNetStage( + config, + config.embedding_size, + config.hidden_sizes[0], + stride=2 if config.downsample_in_first_stage else 1, + depth=config.depths[0], + name="stages.0", + ) + ) + in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) + for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, config.depths[1:])): + self.stages.append(TFRegNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i+1}")) + + def call( + self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True + ) -> TFBaseModelOutputWithNoAttention: + hidden_states = () if output_hidden_states else None + + for stage_module in self.stages: + if output_hidden_states: + hidden_states = hidden_states + (hidden_state,) + + hidden_state = stage_module(hidden_state) + + if output_hidden_states: + hidden_states = hidden_states + (hidden_state,) + + if not return_dict: + return tuple(v for v in [hidden_state, hidden_states] if v is not None) + + return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + for stage in self.stages: + with tf.name_scope(stage.name): + stage.build(None) + + +@keras_serializable +class TFRegNetMainLayer(keras.layers.Layer): + config_class = RegNetConfig + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.config = config + self.embedder = TFRegNetEmbeddings(config, name="embedder") + self.encoder = TFRegNetEncoder(config, name="encoder") + self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler") + + @unpack_inputs + def call( + self, + pixel_values: tf.Tensor, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> TFBaseModelOutputWithPoolingAndNoAttention: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + embedding_output = self.embedder(pixel_values, training=training) + + encoder_outputs = self.encoder( + embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = self.pooler(last_hidden_state) + + # Change to NCHW output format have uniformity in the modules + pooled_output = tf.transpose(pooled_output, perm=(0, 3, 1, 2)) + last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) + + # Change the other hidden state outputs to NCHW as well + if output_hidden_states: + hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return TFBaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embedder", None) is not None: + with tf.name_scope(self.embedder.name): + self.embedder.build(None) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "pooler", None) is not None: + with tf.name_scope(self.pooler.name): + self.pooler.build((None, None, None, None)) + + +class TFRegNetPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = RegNetConfig + base_model_prefix = "regnet" + main_input_name = "pixel_values" + + @property + def input_signature(self): + return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)} + + +REGNET_START_DOCSTRING = r""" + This model is a Tensorflow + [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a + regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +REGNET_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`ConveNextImageProcessor.__call__`] for details. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare RegNet model outputting raw features without any specific head on top.", + REGNET_START_DOCSTRING, +) +class TFRegNetModel(TFRegNetPreTrainedModel): + def __init__(self, config: RegNetConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.regnet = TFRegNetMainLayer(config, name="regnet") + + @unpack_inputs + @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPoolingAndNoAttention, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def call( + self, + pixel_values: tf.Tensor, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.regnet( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + if not return_dict: + return (outputs[0],) + outputs[1:] + + return TFBaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=outputs.last_hidden_state, + pooler_output=outputs.pooler_output, + hidden_states=outputs.hidden_states, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "regnet", None) is not None: + with tf.name_scope(self.regnet.name): + self.regnet.build(None) + + +@add_start_docstrings( + """ + RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for + ImageNet. + """, + REGNET_START_DOCSTRING, +) +class TFRegNetForImageClassification(TFRegNetPreTrainedModel, TFSequenceClassificationLoss): + def __init__(self, config: RegNetConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + self.regnet = TFRegNetMainLayer(config, name="regnet") + # classification head + self.classifier = [ + keras.layers.Flatten(), + keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else tf.identity, + ] + + @unpack_inputs + @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=TFSequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def call( + self, + pixel_values: Optional[tf.Tensor] = None, + labels: Optional[tf.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.regnet( + pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training + ) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + + flattened_output = self.classifier[0](pooled_output) + logits = self.classifier[1](flattened_output) + + loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "regnet", None) is not None: + with tf.name_scope(self.regnet.name): + self.regnet.build(None) + if getattr(self, "classifier", None) is not None: + with tf.name_scope(self.classifier[1].name): + self.classifier[1].build([None, None, None, self.config.hidden_sizes[-1]]) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0fe3bdc82a9a53c444ad25e8f749451f85f839b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__init__.py @@ -0,0 +1,84 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_torch_available, +) + + +_import_structure = { + "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_vision_encoder_decoder"] = ["VisionEncoderDecoderModel"] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_vision_encoder_decoder"] = ["TFVisionEncoderDecoderModel"] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_vision_encoder_decoder"] = ["FlaxVisionEncoderDecoderModel"] + +if TYPE_CHECKING: + from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/configuration_vision_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/configuration_vision_encoder_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b526ee146242757c87f2ce45d18f7caed84a9a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/configuration_vision_encoder_decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_tf_vision_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_tf_vision_encoder_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aeac8e6dc3f6e149963c32fa6f2b38e9836b9b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_tf_vision_encoder_decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_vision_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_vision_encoder_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70989edbef025803c38381842c689adf720912c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/__pycache__/modeling_vision_encoder_decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..a4aa663f98526fa030f42584ce11da9852d74050 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py @@ -0,0 +1,209 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict + +from packaging import version + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging +from ..auto.configuration_auto import AutoConfig + + +if TYPE_CHECKING: + from ... import PreTrainedTokenizerBase, TensorType + +logger = logging.get_logger(__name__) + + +class VisionEncoderDecoderConfig(PretrainedConfig): + r""" + [`VisionEncoderDecoderConfig`] is the configuration class to store the configuration of a + [`VisionEncoderDecoderModel`]. It is used to instantiate a Vision-Encoder-Text-Decoder model according to the + specified arguments, defining the encoder and decoder configs. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + kwargs (*optional*): + Dictionary of keyword arguments. Notably: + + - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines + the encoder config. + - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines + the decoder config. + + Examples: + + ```python + >>> from transformers import BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel + + >>> # Initializing a ViT & BERT style configuration + >>> config_encoder = ViTConfig() + >>> config_decoder = BertConfig() + + >>> config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) + + >>> # Initializing a ViTBert model (with random weights) from a ViT & google-bert/bert-base-uncased style configurations + >>> model = VisionEncoderDecoderModel(config=config) + + >>> # Accessing the model configuration + >>> config_encoder = model.config.encoder + >>> config_decoder = model.config.decoder + >>> # set decoder config to causal lm + >>> config_decoder.is_decoder = True + >>> config_decoder.add_cross_attention = True + + >>> # Saving the model, including its configuration + >>> model.save_pretrained("my-model") + + >>> # loading model and config from pretrained folder + >>> encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained("my-model") + >>> model = VisionEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) + ```""" + + model_type = "vision-encoder-decoder" + is_composition = True + + def __init__(self, **kwargs): + super().__init__(**kwargs) + if "encoder" not in kwargs or "decoder" not in kwargs: + raise ValueError( + f"A configuraton of type {self.model_type} cannot be instantiated because " + f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" + ) + + encoder_config = kwargs.pop("encoder") + encoder_model_type = encoder_config.pop("model_type") + decoder_config = kwargs.pop("decoder") + decoder_model_type = decoder_config.pop("model_type") + + self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config) + self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config) + self.is_encoder_decoder = True + + @classmethod + def from_encoder_decoder_configs( + cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs + ) -> PretrainedConfig: + r""" + Instantiate a [`VisionEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model + configuration and decoder model configuration. + + Returns: + [`VisionEncoderDecoderConfig`]: An instance of a configuration object + """ + logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") + decoder_config.is_decoder = True + decoder_config.add_cross_attention = True + + return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) + + +class VisionEncoderDecoderEncoderOnnxConfig(OnnxConfig): + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}}) + + +class VisionEncoderDecoderDecoderOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + common_inputs = OrderedDict() + common_inputs["input_ids"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + common_inputs["attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + common_inputs["encoder_hidden_states"] = {0: "batch", 1: "encoder_sequence"} + + return common_inputs + + def generate_dummy_inputs( + self, + tokenizer: "PreTrainedTokenizerBase", + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + import torch + + common_inputs = OrderedDict() + + dummy_input = super().generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + batch, encoder_sequence = dummy_input["input_ids"].shape + encoder_hidden_states_shape = (batch, encoder_sequence, self._config.encoder_hidden_size) + common_inputs["input_ids"] = dummy_input.pop("input_ids") + common_inputs["attention_mask"] = dummy_input.pop("attention_mask") + common_inputs["encoder_hidden_states"] = torch.zeros(encoder_hidden_states_shape) + + return common_inputs + + +class VisionEncoderDecoderOnnxConfig(OnnxConfig): + @property + def inputs(self) -> None: + pass + + def get_encoder_config(self, encoder_config: PretrainedConfig) -> OnnxConfig: + r""" + Returns ONNX encoder config for `VisionEncoderDecoder` model. + + Args: + encoder_config (`PretrainedConfig`): + The encoder model's configuration to use when exporting to ONNX. + + Returns: + [`VisionEncoderDecoderEncoderOnnxConfig`]: An instance of the ONNX configuration object + """ + return VisionEncoderDecoderEncoderOnnxConfig(encoder_config) + + def get_decoder_config( + self, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, feature: str = "default" + ) -> OnnxConfig: + r""" + Returns ONNX decoder config for `VisionEncoderDecoder` model. + + Args: + encoder_config (`PretrainedConfig`): + The encoder model's configuration to use when exporting to ONNX. + decoder_config (`PretrainedConfig`): + The decoder model's configuration to use when exporting to ONNX + feature (`str`, *optional*): + The type of feature to export the model with. + + Returns: + [`VisionEncoderDecoderDecoderOnnxConfig`]: An instance of the ONNX configuration object. + """ + decoder_config.encoder_hidden_size = encoder_config.hidden_size + return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..987c9a1afa3d1904d05ae52a622e32619022efa4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py @@ -0,0 +1,862 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Classes to support Vision-Encoder-Text-Decoder architectures""" + + +import os +from typing import Optional, Tuple, Union + +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax +from jax.random import PRNGKey + +from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput +from ...modeling_flax_utils import FlaxPreTrainedModel +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from ..auto.configuration_auto import AutoConfig +from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM +from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "VisionEncoderDecoderConfig" + +VISION_ENCODER_DECODER_START_DOCSTRING = r""" + This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model + as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via + [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`] + function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream + generative task, like image captioning. + + The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation + tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation + Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi + Zhou, Wei Li, Peter J. Liu. + + Additionally, in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained + Models](https://arxiv.org/abs/2109.10282) it is shown how leveraging large pretrained vision models for optical + character recognition (OCR) yields a significant performance improvement. + + After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any + other models (see the examples for more information). + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Parameters: + config ([`VisionEncoderDecoderConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +VISION_ENCODER_DECODER_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using the vision model's image processor. For example, using + [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. + decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + decoder_position_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.decoder.max_position_embeddings - 1]`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple. +""" + +VISION_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using the vision model's image processor. For example, using + [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple. +""" + +VISION_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r""" + Args: + decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + For sequence to sequence training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is + provided, the model will create this tensor by shifting the `input_ids` to the right for denoising + pre-training. + encoder_outputs (`tuple(tuple(jnp.ndarray)`): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + decoder_position_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.decoder.max_position_embeddings - 1]`. + past_key_values (`Dict[str, jnp.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a + plain tuple. +""" + + +class FlaxVisionEncoderDecoderModule(nn.Module): + config: VisionEncoderDecoderConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + encoder_config = self.config.encoder + decoder_config = self.config.decoder + + # Copied from `modeling_hybrid_clip.py` with modifications. + from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING + + encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class + decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class + + self.encoder = encoder_module(encoder_config, dtype=self.dtype) + self.decoder = decoder_module(decoder_config, dtype=self.dtype) + + # encoder outputs might need to be projected to different dimension for decoder + if ( + self.encoder.config.hidden_size != self.decoder.config.hidden_size + and self.decoder.config.cross_attention_hidden_size is None + ): + self.enc_to_dec_proj = nn.Dense( + self.decoder.config.hidden_size, + kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range), + dtype=self.dtype, + ) + else: + self.enc_to_dec_proj = None + + def _get_encoder_module(self): + return self.encoder + + def _get_projection_module(self): + return self.enc_to_dec_proj + + def _get_decoder_module(self): + return self.decoder + + def __call__( + self, + pixel_values, + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + encoder_outputs = self.encoder( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + encoder_hidden_states = encoder_outputs[0] + + # optionally project encoder_hidden_states + if self.enc_to_dec_proj is not None: + encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) + + # The advantage of explicitly setting this is TPU XLA compiler knows as soon as possible what shape this + # variable has and can better optimize. Also passing `None` can lead to some problems when jitting the model. + # In Flax/JAX, we only want to pass `None` for non-tensor function inputs. For all tensor function inputs, we + # should always pass a tensor and not `None`. + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + position_ids=decoder_position_ids, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return FlaxSeq2SeqLMOutput( + logits=decoder_outputs.logits, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings(VISION_ENCODER_DECODER_START_DOCSTRING) +class FlaxVisionEncoderDecoderModel(FlaxPreTrainedModel): + r""" + [`FlaxVisionEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture + with the module (flax.nn.Module) of one of the base vision model classes of the library as encoder module and + another one as decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method + for the encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder. + """ + + config_class = VisionEncoderDecoderConfig + base_model_prefix = "vision_encoder_decoder" + main_input_name = "pixel_values" + module_class = FlaxVisionEncoderDecoderModule + + def __init__( + self, + config: VisionEncoderDecoderConfig, + input_shape: Optional[Tuple] = None, + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + if not _do_init: + raise ValueError( + "`FlaxVisionEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`." + ) + + if input_shape is None: + num_channels = getattr(config.encoder, "num_channels", 3) + input_shape = ( + (1, config.encoder.image_size, config.encoder.image_size, num_channels), + (1, 1), + ) + + if config.decoder.cross_attention_hidden_size is not None: + if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: + raise ValueError( + "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" + f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" + f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" + " `config.encoder.hidden_size`." + ) + + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + encoder_input_shape, decoder_input_shape = input_shape + + # init input tensors + pixel_values = jnp.zeros(encoder_input_shape, dtype=self.dtype) + decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4") + decoder_attention_mask = jnp.ones_like(decoder_input_ids) + + batch_size, _, _, _ = pixel_values.shape + decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape + if not decoder_batch_size == batch_size: + raise ValueError( + f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder " + f"and {decoder_batch_size} for decoder." + ) + decoder_position_ids = jnp.broadcast_to( + jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length) + ) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init( + rngs, + pixel_values, + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + )["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length, encoder_outputs): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): + `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: + `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) + is a sequence of hidden-states at the output of the last layer of the encoder. Used in the + cross-attention of the decoder. + """ + # init input variables to retrieve cache + decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") + decoder_attention_mask = jnp.ones_like(decoder_input_ids) + decoder_position_ids = jnp.broadcast_to( + jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape + ) + + def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): + decoder_module = module._get_decoder_module() + return decoder_module( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + position_ids=decoder_position_ids, + **kwargs, + ) + + init_variables = self.module.init( + jax.random.PRNGKey(0), + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + init_cache=True, + method=_decoder_forward, # we only need to call the decoder to init the cache + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings(VISION_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC) + def encode( + self, + pixel_values: jnp.ndarray, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoImageProcessor, FlaxVisionEncoderDecoderModel + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + + >>> # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized + >>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google/vit-base-patch16-224-in21k", "openai-community/gpt2" + ... ) + + >>> pixel_values = image_processor(images=image, return_tensors="np").pixel_values + >>> encoder_outputs = model.encode(pixel_values) + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + # `FlaxViTModel` expects channel first format, but `FlaxViTModule` expects channel last format. + # Currently, we assume this holds for all Flax vision models, and perform a transpose here. + pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + def _encoder_forward(module, pixel_values, **kwargs): + encode_module = module._get_encoder_module() + return encode_module(pixel_values, **kwargs) + + outputs = self.module.apply( + {"params": params or self.params}, + pixel_values=jnp.array(pixel_values, dtype=self.dtype), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + method=_encoder_forward, + ) + + if return_dict: + outputs = FlaxBaseModelOutput( + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + return outputs + + @add_start_docstrings(VISION_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def decode( + self, + decoder_input_ids, + encoder_outputs, + decoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + past_key_values: dict = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoImageProcessor, FlaxVisionEncoderDecoderModel + >>> import jax.numpy as jnp + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + + >>> # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized + >>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google/vit-base-patch16-224-in21k", "openai-community/gpt2" + ... ) + + >>> pixel_values = image_processor(images=image, return_tensors="np").pixel_values + >>> encoder_outputs = model.encode(pixel_values) + + >>> decoder_start_token_id = model.config.decoder.bos_token_id + >>> decoder_input_ids = jnp.ones((pixel_values.shape[0], 1), dtype="i4") * decoder_start_token_id + + >>> outputs = model.decode(decoder_input_ids, encoder_outputs) + >>> logits = outputs.logits + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + encoder_hidden_states = encoder_outputs[0] + + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + batch_size, sequence_length = decoder_input_ids.shape + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + if decoder_position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") + + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be + # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that + # it can be changed by FlaxBartAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + def _decoder_forward( + module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs + ): + projection_module = module._get_projection_module() + decoder_module = module._get_decoder_module() + + # optionally project encoder_hidden_states + if projection_module is not None: + encoder_hidden_states = projection_module(encoder_hidden_states) + + return decoder_module( + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + encoder_hidden_states, + **kwargs, + ) + + outputs = self.module.apply( + inputs, + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + mutable=mutable, + method=_decoder_forward, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past = outputs + outputs["past_key_values"] = unfreeze(past["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past = outputs + outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] + + return outputs + + @add_start_docstrings_to_model_forward(VISION_ENCODER_DECODER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + def __call__( + self, + pixel_values: jnp.ndarray, + decoder_input_ids: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from transformers import FlaxVisionEncoderDecoderModel, AutoImageProcessor, AutoTokenizer + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + + >>> # load output tokenizer + >>> tokenizer_output = AutoTokenizer.from_pretrained("openai-community/gpt2") + + >>> # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized + >>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google/vit-base-patch16-224-in21k", "openai-community/gpt2" + ... ) + + >>> pixel_values = image_processor(images=image, return_tensors="np").pixel_values + + >>> # use GPT2's eos_token as the pad as well as eos token + >>> model.config.eos_token_id = model.config.decoder.eos_token_id + >>> model.config.pad_token_id = model.config.eos_token_id + + >>> # generation + >>> sequences = model.generate(pixel_values, num_beams=4, max_length=12).sequences + + >>> captions = tokenizer_output.batch_decode(sequences, skip_special_tokens=True) + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + # prepare encoder inputs + + # `FlaxViTModel` expects channel first format, but `FlaxViTModule` expects channel last format. + # Currently, we assume this holds for all Flax vision models, and perform a transpose here. + pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) + + # prepare decoder inputs + if decoder_input_ids is None: + raise ValueError("`decoder_input_ids` can't be `None`.") + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones_like(decoder_input_ids) + if decoder_position_ids is None: + batch_size, sequence_length = decoder_input_ids.shape + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} + + return self.module.apply( + {"params": params or self.params}, + pixel_values=jnp.array(pixel_values, dtype=self.dtype), + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + max_length, + decoder_attention_mask: Optional[jax.Array] = None, + encoder_outputs=None, + **kwargs, + ): + # initializing the cache + batch_size, seq_length = decoder_input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since the decoder uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if decoder_attention_mask is not None: + decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) + else: + decoder_position_ids = jnp.broadcast_to( + jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length) + ) + + return { + "past_key_values": past_key_values, + "encoder_outputs": encoder_outputs, + "decoder_attention_mask": extended_attention_mask, + "decoder_position_ids": decoder_position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 + return model_kwargs + + @classmethod + def from_encoder_decoder_pretrained( + cls, + encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, + decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, + *model_args, + **kwargs, + ) -> FlaxPreTrainedModel: + r""" + Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model + checkpoints. + + Params: + encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*): + Information necessary to initiate the encoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An + example is `google/vit-base-patch16-224-in21k`. + - A path to a *directory* containing model weights saved using + [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + + decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`): + Information necessary to initiate the decoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + + model_args (remaining positional arguments, *optional*): + All remaning positional arguments will be passed to the underlying model's `__init__` method. + + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). + + - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. + - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. + - To update the parent model configuration, do not use a prefix for each configuration parameter. + + Behaves differently depending on whether a `config` is provided or automatically loaded. + + Example: + + ```python + >>> from transformers import FlaxVisionEncoderDecoderModel + + >>> # initialize a vit-gpt2 from a pretrained ViT and a pretrained GPT2 model. Note that the cross-attention layers will be randomly initialized + >>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google/vit-base-patch16-224-in21k", "openai-community/gpt2" + ... ) + >>> # saving model after fine-tuning + >>> model.save_pretrained("./vit-gpt2") + >>> # load fine-tuned model + >>> model = FlaxVisionEncoderDecoderModel.from_pretrained("./vit-gpt2") + ```""" + + kwargs_encoder = { + argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") + } + + kwargs_decoder = { + argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") + } + + # remove encoder, decoder kwargs from kwargs + for key in kwargs_encoder.keys(): + del kwargs["encoder_" + key] + for key in kwargs_decoder.keys(): + del kwargs["decoder_" + key] + + # Load and initialize the encoder and decoder + # The distinction between encoder and decoder at the model level is made + # by the value of the flag `is_decoder` that we need to set correctly. + encoder = kwargs_encoder.pop("model", None) + if encoder is None: + if encoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_encoder: + encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path) + if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: + logger.info( + f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " + "from a decoder model. Cross-attention and casual mask are disabled." + ) + encoder_config.is_decoder = False + encoder_config.add_cross_attention = False + + kwargs_encoder["config"] = encoder_config + + encoder = FlaxAutoModel.from_pretrained( + encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder + ) + + decoder = kwargs_decoder.pop("model", None) + if decoder is None: + if decoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_decoder: + decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) + if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: + logger.info( + f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" + f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" + f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." + ) + decoder_config.is_decoder = True + decoder_config.add_cross_attention = True + + kwargs_decoder["config"] = decoder_config + + if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: + logger.warning( + f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " + f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " + "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " + "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " + "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" + ) + + decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) + + # instantiate config with corresponding kwargs + dtype = kwargs.pop("dtype", jnp.float32) + config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) + + # init model + model = cls(config, dtype=dtype) + model.params["encoder"] = encoder.params + model.params["decoder"] = decoder.params + + return model diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..75ff2dbd82e48bd126b789bbc3f58ad8fbbf14e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py @@ -0,0 +1,698 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Classes to support TF Vision-Encoder-Text-Decoder architectures""" + + +from __future__ import annotations + +import re +import warnings +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...configuration_utils import PretrainedConfig +from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput +from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, get_initializer, keras, unpack_inputs +from ...tf_utils import shape_list +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from ..auto.configuration_auto import AutoConfig +from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM +from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "VisionEncoderDecoderConfig" + +DEPRECATION_WARNING = ( + "Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the" + " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if" + " fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the" + " labels, no need to pass them yourself anymore." +) + +VISION_ENCODER_DECODER_START_DOCSTRING = r""" + This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model + as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via + [`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`] + function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream + generative task, like image captioning. + + The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation + tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation + Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi + Zhou, Wei Li, Peter J. Liu. + + Additionally, in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained + Models](https://arxiv.org/abs/2109.10282) it is shown how leveraging large pretrained vision models for optical + character recognition (OCR) yields a significant performance improvement. + + After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any + other models (see the examples for more information). + + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`VisionEncoderDecoderConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +VISION_ENCODER_DECODER_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using the vision's model's image processor. For example, using + [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. + decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + Provide for sequence to sequence training to the decoder. Indices can be obtained using + [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for + details. + decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*): + This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output + of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `({0})`. + decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. This is useful if you want more control over how to convert `decoder_input_ids` indices + into associated vectors than the model's internal embedding lookup matrix. + labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0, + ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: + + - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. + - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. +""" + + +# Copied from transformers.models.encoder_decoder.modeling_tf_encoder_decoder.shift_tokens_right +def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): + if pad_token_id is None: + raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") + pad_token_id = tf.cast(pad_token_id, input_ids.dtype) + + if decoder_start_token_id is None: + raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") + decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) + + start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids = tf.where( + shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + ) + + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) + + return shifted_input_ids + + +@add_start_docstrings(VISION_ENCODER_DECODER_START_DOCSTRING) +class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss): + r""" + [`TFVisionEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture + with one of the base vision model classes of the library as encoder and another one of the base model classes as + decoder when created with the [`~TFAutoModel.from_pretrained`] class method for the encoder and + [`~TFAutoModelForCausalLM.from_pretrained`] class method for the decoder. + """ + + config_class = VisionEncoderDecoderConfig + base_model_prefix = "vision_encoder_decoder" + load_weight_prefix = "tf_vision_encoder_decoder_model" + main_input_name = "pixel_values" + + def __init__( + self, + config: Optional[PretrainedConfig] = None, + encoder: Optional[TFPreTrainedModel] = None, + decoder: Optional[TFPreTrainedModel] = None, + ): + if config is None and (encoder is None or decoder is None): + raise ValueError("Either a configuration or an encoder and a decoder has to be provided.") + if config is None: + config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) + else: + if not isinstance(config, self.config_class): + raise ValueError(f"config: {config} has to be of type {self.config_class}") + + if config.decoder.cross_attention_hidden_size is not None: + if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: + raise ValueError( + "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" + f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" + f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" + " `config.encoder.hidden_size`." + ) + + # initialize with config + super().__init__(config) + + if encoder is None: + encoder = TFAutoModel.from_config(config.encoder, name="encoder") + + if decoder is None: + decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder") + + self.encoder = encoder + self.decoder = decoder + + if self.encoder.config.to_dict() != self.config.encoder.to_dict(): + logger.warning( + f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:" + f" {self.config.encoder}" + ) + if self.decoder.config.to_dict() != self.config.decoder.to_dict(): + logger.warning( + f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:" + f" {self.config.decoder}" + ) + + # make sure that the individual model's config refers to the shared config + # so that the updates to the config will be synced + self.encoder.config = self.config.encoder + self.decoder.config = self.config.decoder + + # encoder outputs might need to be projected to different dimension for decoder + if ( + self.encoder.config.hidden_size != self.decoder.config.hidden_size + and self.decoder.config.cross_attention_hidden_size is None + ): + self.enc_to_dec_proj = keras.layers.Dense( + units=self.decoder.config.hidden_size, + kernel_initializer=get_initializer(config.encoder.initializer_range), + name="enc_to_dec_proj", + ) + + if self.encoder.get_output_embeddings() is not None: + raise ValueError( + f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" + ) + + @property + def input_signature(self): + vision_config = self.config.encoder + if hasattr(vision_config, "vision_config"): + vision_config = vision_config.vision_config + if hasattr(vision_config, "image_size"): + image_size = vision_config.image_size + else: + image_size = vision_config.input_size + return { + "pixel_values": tf.TensorSpec( + shape=( + None, + vision_config.num_channels, + image_size, + image_size, + ), + dtype=tf.float32, + ), + "decoder_input_ids": tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="decoder_input_ids"), + } + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def get_input_embeddings(self): + return self.encoder.get_input_embeddings() + + def get_output_embeddings(self): + return self.decoder.get_output_embeddings() + + def set_output_embeddings(self, new_embeddings): + return self.decoder.set_output_embeddings(new_embeddings) + + def tf_to_pt_weight_rename(self, tf_weight): + # Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models + # (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal. + # However, the name of that extra layer is the name of the MainLayer in the base model. We make the assumption + # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's + # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name! + + # This override is only needed in the case where we're crossloading weights from PT. However, since weights are + # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file. + # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it + # or not. + encoder_model_type = self.config.encoder.model_type + if "encoder" in tf_weight and "decoder" not in tf_weight: + return (re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight),) + else: + return (tf_weight,) + + @classmethod + def from_encoder_decoder_pretrained( + cls, + encoder_pretrained_model_name_or_path: str = None, + decoder_pretrained_model_name_or_path: str = None, + *model_args, + **kwargs, + ) -> TFPreTrainedModel: + r""" + Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model + checkpoints. + + + Params: + encoder_pretrained_model_name_or_path (`str`, *optional*): + Information necessary to initiate the encoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An + example is `google/vit-base-patch16-224-in21k`. + - A path to a *directory* containing model weights saved using + [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case, + `encoder_from_pt` should be set to `True`. + + decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to *None*): + Information necessary to initiate the decoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case, + `decoder_from_pt` should be set to `True`. + + model_args (remaining positional arguments, *optional*): + All remaning positional arguments will be passed to the underlying model's `__init__` method. + + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). + + - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. + - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. + - To update the parent model configuration, do not use a prefix for each configuration parameter. + + Behaves differently depending on whether a `config` is provided or automatically loaded. + + Example: + + ```python + >>> from transformers import TFVisionEncoderDecoderModel + + >>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized + >>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google/vit-base-patch16-224-in21k", "google-bert/bert-base-uncased" + ... ) + >>> # saving model after fine-tuning + >>> model.save_pretrained("./vit-bert") + >>> # load fine-tuned model + >>> model = TFVisionEncoderDecoderModel.from_pretrained("./vit-bert") + ```""" + + kwargs_encoder = { + argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") + } + + kwargs_decoder = { + argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") + } + + # remove encoder, decoder kwargs from kwargs + for key in kwargs_encoder.keys(): + del kwargs["encoder_" + key] + for key in kwargs_decoder.keys(): + del kwargs["decoder_" + key] + + # Load and initialize the encoder and decoder + # The distinction between encoder and decoder at the model level is made + # by the value of the flag `is_decoder` that we need to set correctly. + encoder = kwargs_encoder.pop("model", None) + if encoder is None: + if encoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_encoder: + encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path) + if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: + logger.info( + f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " + "from a decoder model. Cross-attention and casual mask are disabled." + ) + encoder_config.is_decoder = False + encoder_config.add_cross_attention = False + + kwargs_encoder["config"] = encoder_config + + kwargs_encoder["name"] = "encoder" + kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix + encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) + + decoder = kwargs_decoder.pop("model", None) + if decoder is None: + if decoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_decoder: + decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) + if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: + logger.info( + f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" + f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" + f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." + ) + decoder_config.is_decoder = True + decoder_config.add_cross_attention = True + + kwargs_decoder["config"] = decoder_config + + if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: + logger.warning( + f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " + f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " + "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " + "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " + "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" + ) + + kwargs_decoder["name"] = "decoder" + kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix + decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) + + # Make sure these 2 `keras.Model` have fixed names so `from_pretrained` could load model weights correctly. + if encoder.name != "encoder": + raise ValueError("encoder model must be created with the name `encoder`.") + if decoder.name != "decoder": + raise ValueError("decoder model must be created with the name `decoder`.") + + # instantiate config with corresponding kwargs + config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) + return cls(encoder=encoder, decoder=decoder, config=config) + + @unpack_inputs + @add_start_docstrings_to_model_forward( + VISION_ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + pixel_values: np.ndarray | tf.Tensor | None = None, + decoder_input_ids: np.ndarray | tf.Tensor | None = None, + decoder_attention_mask: np.ndarray | tf.Tensor | None = None, + encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, + labels: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + **kwargs, + ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, AutoTokenizer, TFVisionEncoderDecoderModel + >>> from PIL import Image + >>> import requests + + >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + >>> decoder_tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + + >>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized + >>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google/vit-base-patch16-224-in21k", "openai-community/gpt2" + ... ) + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> img = Image.open(requests.get(url, stream=True).raw) + + >>> # forward + >>> pixel_values = image_processor(images=img, return_tensors="tf").pixel_values # Batch size 1 + >>> decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids # Batch size 1 + >>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) + + >>> # training + >>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) + >>> loss, logits = outputs.loss, outputs.logits + + >>> # save and load from pretrained + >>> model.save_pretrained("vit-gpt2") + >>> model = TFVisionEncoderDecoderModel.from_pretrained("vit-gpt2") + + >>> # generation + >>> generated = model.generate(pixel_values, decoder_start_token_id=model.config.decoder.bos_token_id) + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")} + + kwargs_decoder = { + argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") + } + + # Let the user be responsible for the expected format. + if encoder_outputs is not None: + if return_dict and not isinstance(encoder_outputs, ModelOutput): + raise ValueError( + "If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of " + f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`." + ) + + if encoder_outputs is None: + encoder_inputs = { + "input_ids": pixel_values, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + "training": training, + } + + # Add arguments to encoder from `kwargs_encoder` + encoder_inputs.update(kwargs_encoder) + + if "input_ids" in encoder_inputs: + encoder_inputs["pixel_values"] = encoder_inputs.pop("input_ids") + + if encoder_inputs["pixel_values"] is None: + raise ValueError("You have to specify pixel_values") + + # Handle the case where the inputs are passed as a single dict which contains `labels`. + # The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this + # parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`). + if "labels" in encoder_inputs: + labels = encoder_inputs.pop("labels") + + # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`. + if "decoder_input_ids" in encoder_inputs: + decoder_input_ids = encoder_inputs.pop("decoder_input_ids") + # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`. + if "decoder_attention_mask" in encoder_inputs: + decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask") + + encoder_outputs = self.encoder(**encoder_inputs) + + encoder_hidden_states = encoder_outputs[0] + + # optionally project encoder_hidden_states + if ( + self.encoder.config.hidden_size != self.decoder.config.hidden_size + and self.decoder.config.cross_attention_hidden_size is None + ): + encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) + + if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + batch_size, sequence_length = shape_list(encoder_hidden_states)[:2] + encoder_attention_mask = tf.ones(shape=(batch_size, sequence_length), dtype=tf.int32) + + decoder_inputs = { + "input_ids": decoder_input_ids, + "attention_mask": decoder_attention_mask, + "encoder_hidden_states": encoder_hidden_states, + "encoder_attention_mask": encoder_attention_mask, + "inputs_embeds": decoder_inputs_embeds, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "use_cache": use_cache, + "past_key_values": past_key_values, + "return_dict": return_dict, + "training": training, + } + + # Add arguments to decoder from `kwargs_decoder` + decoder_inputs.update(kwargs_decoder) + + decoder_outputs = self.decoder(**decoder_inputs) + + logits = decoder_outputs[0] + + # Compute loss independent from decoder (as some shift the logits inside them) + loss = None + if labels is not None: + warnings.warn(DEPRECATION_WARNING, FutureWarning) + loss = self.hf_compute_loss(labels, logits) + + if not return_dict: + past_key_values = None + if use_cache: + past_key_values = decoder_outputs[1] + # The starting index of the remaining elements in `decoder_outputs` + start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)]) + + if not isinstance(encoder_outputs, tuple): + encoder_outputs = encoder_outputs.to_tuple() + output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs + output = tuple([x for x in output if x is not None]) + return output + + return TFSeq2SeqLMOutput( + loss=loss, + logits=decoder_outputs.logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + def serving_output(self, output): + pkv = tf.tuple(output.past_key_values)[1] if self.config.decoder.use_cache else None + dec_hs = ( + tf.convert_to_tensor(output.decoder_hidden_states) if self.config.decoder.output_hidden_states else None + ) + dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.decoder.output_attentions else None + enc_hs = ( + tf.convert_to_tensor(output.encoder_hidden_states) if self.config.encoder.output_hidden_states else None + ) + enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.encoder.output_attentions else None + cross_attns = ( + tf.convert_to_tensor(output.cross_attentions) + if self.config.decoder.output_attentions and output.cross_attentions is not None + else None + ) + + return TFSeq2SeqLMOutput( + logits=output.logits, + past_key_values=pkv, + decoder_hidden_states=dec_hs, + decoder_attentions=dec_attns, + encoder_last_hidden_state=output.encoder_last_hidden_state, + encoder_hidden_states=enc_hs, + encoder_attentions=enc_attns, + cross_attentions=cross_attns, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs + ): + decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values) + decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None + past_key_values = decoder_inputs.get("past_key_values") + input_dict = { + "pixel_values": None, # needs to be passed to make Keras.layer.__call__ happy + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "decoder_input_ids": decoder_inputs["input_ids"], + # TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete + "encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]), + "past_key_values": past_key_values, + "use_cache": use_cache, + } + return input_dict + + def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + def resize_token_embeddings(self, *args, **kwargs): + raise NotImplementedError( + "Resizing the embedding layers via the TFVisionEncoderDecoderModel directly is not supported. " + "Please use the respective methods of the wrapped objects (model.decoder.resize_token_embeddings(...))" + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "enc_to_dec_proj", None) is not None: + with tf.name_scope(self.enc_to_dec_proj.name): + self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size]) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "decoder", None) is not None: + with tf.name_scope(self.decoder.name): + self.decoder.build(None) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..0bdf76044153b164cdcfdb76be181df19463cdaf --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -0,0 +1,668 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Classes to support Vision-Encoder-Text-Decoder architectures""" + + +import gc +import os +import tempfile +from typing import Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...configuration_utils import PretrainedConfig +from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput +from ...modeling_utils import PreTrainedModel +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from ..auto.configuration_auto import AutoConfig +from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM +from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig + + +# Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + if decoder_start_token_id is None: + raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "VisionEncoderDecoderConfig" + +VISION_ENCODER_DECODER_START_DOCSTRING = r""" + This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model + as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via + [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`] + function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream + generative task, like image captioning. + + The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation + tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation + Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi + Zhou, Wei Li, Peter J. Liu. + + Additionally, in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained + Models](https://arxiv.org/abs/2109.10282) it is shown how leveraging large pretrained vision models for optical + character recognition (OCR) yields a significant performance improvement. + + After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any + other models (see the examples for more information). + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`VisionEncoderDecoderConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +VISION_ENCODER_DECODER_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using an image processor (e.g. if you use ViT as the encoder, + you should use [`AutoImageProcessor`]). See [`ViTImageProcessor.__call__`] for details. + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the + right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. + decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + encoder_outputs (`tuple(torch.FloatTensor)`, *optional*): + This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor + of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the + decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. This is useful if you want more control over how to convert `decoder_input_ids` indices + into associated vectors than the model's internal embedding lookup matrix. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0, + ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: + + - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. + - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. +""" + + +@add_start_docstrings(VISION_ENCODER_DECODER_START_DOCSTRING) +class VisionEncoderDecoderModel(PreTrainedModel): + r""" + [`VisionEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with + one of the base vision model classes of the library as encoder and another one as decoder when created with the + :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and + :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder. + """ + + config_class = VisionEncoderDecoderConfig + base_model_prefix = "vision_encoder_decoder" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def __init__( + self, + config: Optional[PretrainedConfig] = None, + encoder: Optional[PreTrainedModel] = None, + decoder: Optional[PreTrainedModel] = None, + ): + if config is None and (encoder is None or decoder is None): + raise ValueError("Either a configuration or an encoder and a decoder has to be provided.") + if config is None: + config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) + else: + if not isinstance(config, self.config_class): + raise ValueError(f"Config: {config} has to be of type {self.config_class}") + + if config.decoder.cross_attention_hidden_size is not None: + if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: + raise ValueError( + "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" + f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" + f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" + " `config.encoder.hidden_size`." + ) + + # initialize with config + # make sure input & output embeddings is not tied + config.tie_word_embeddings = False + super().__init__(config) + + if encoder is None: + encoder = AutoModel.from_config(config.encoder) + + if decoder is None: + decoder = AutoModelForCausalLM.from_config(config.decoder) + + self.encoder = encoder + self.decoder = decoder + + if self.encoder.config.to_dict() != self.config.encoder.to_dict(): + logger.warning( + f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:" + f" {self.config.encoder}" + ) + if self.decoder.config.to_dict() != self.config.decoder.to_dict(): + logger.warning( + f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:" + f" {self.config.decoder}" + ) + + # make sure that the individual model's config refers to the shared config + # so that the updates to the config will be synced + self.encoder.config = self.config.encoder + self.decoder.config = self.config.decoder + + # encoder outputs might need to be projected to different dimension for decoder + if ( + self.encoder.config.hidden_size != self.decoder.config.hidden_size + and self.decoder.config.cross_attention_hidden_size is None + ): + self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size) + + if self.encoder.get_output_embeddings() is not None: + raise ValueError( + f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" + ) + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def get_output_embeddings(self): + return self.decoder.get_output_embeddings() + + def set_output_embeddings(self, new_embeddings): + return self.decoder.set_output_embeddings(new_embeddings) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + r""" + Example: + + ```python + >>> from transformers import VisionEncoderDecoderModel, AutoImageProcessor, AutoTokenizer + >>> from PIL import Image + >>> import requests + + >>> image_processor = AutoImageProcessor.from_pretrained("ydshieh/vit-gpt2-coco-en") + >>> decoder_tokenizer = AutoTokenizer.from_pretrained("ydshieh/vit-gpt2-coco-en") + >>> model = VisionEncoderDecoderModel.from_pretrained("ydshieh/vit-gpt2-coco-en") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> img = Image.open(requests.get(url, stream=True).raw) + >>> pixel_values = image_processor(images=img, return_tensors="pt").pixel_values # Batch size 1 + + >>> output_ids = model.generate( + ... pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True + ... ).sequences + + >>> preds = decoder_tokenizer.batch_decode(output_ids, skip_special_tokens=True) + >>> preds = [pred.strip() for pred in preds] + + >>> assert preds == ["a cat laying on top of a couch next to another cat"] + ```""" + + from_tf = kwargs.pop("from_tf", False) + if from_tf: + from transformers import TFVisionEncoderDecoderModel + + # a workaround to load from tensorflow checkpoint + # Using `_tf_model` won't work, because the weight names in the encoder/decoder of `_tf_model` get + # extended before saving those components. For example, The name of `_tf_model.encoder.vit` is + # `[top model name]/encoder/vit`, but the name of `tf_model.encoder.vit` is `[top model name]/vit`. The + # [top model name] is handled (stripped) by the conversion method, and the former case gets extra `encoder`, + # which should not occur when we want to save the components alone. + # There was a (very) ugly potential fix, which wasn't integrated to `transformers`: see + # https://github.com/huggingface/transformers/pull/13222/commits/dbb3c9de76eee235791d2064094654637c99f36d#r697304245 + # (the change in `src/transformers/modeling_tf_utils.py`) + _tf_model = TFVisionEncoderDecoderModel.from_pretrained( + pretrained_model_name_or_path, *model_args, **kwargs + ) + config = _tf_model.config + + # Using `tf_model` instead + encoder = _tf_model.encoder.__class__(_tf_model.config.encoder) + decoder = _tf_model.decoder.__class__(_tf_model.config.decoder) + # Make sure models are built + encoder(encoder.dummy_inputs) + decoder(decoder.dummy_inputs) + + # Get the variable correspondence between `_tf_model` and `encoder` and `decoder` + encoder_variables = {} + for v in encoder.trainable_variables + encoder.non_trainable_variables: + encoder_variables["/".join(v.name.split("/")[1:])] = v + decoder_variables = {} + for v in decoder.trainable_variables + decoder.non_trainable_variables: + decoder_variables["/".join(v.name.split("/")[1:])] = v + + _encoder_variables = {} + for v in _tf_model.encoder.trainable_variables + _tf_model.encoder.non_trainable_variables: + _encoder_variables["/".join(v.name.split("/")[2:])] = v + _decoder_variables = {} + for v in _tf_model.decoder.trainable_variables + _tf_model.decoder.non_trainable_variables: + _decoder_variables["/".join(v.name.split("/")[2:])] = v + + # assign weight values to `encoder` and `decoder` from `_tf_model` + for name, v in encoder_variables.items(): + v.assign(_encoder_variables[name]) + for name, v in decoder_variables.items(): + v.assign(_decoder_variables[name]) + + tf_model = TFVisionEncoderDecoderModel(encoder=encoder, decoder=decoder) + + # Deal with `enc_to_dec_proj` + if hasattr(_tf_model, "enc_to_dec_proj"): + tf_model(tf_model.dummy_inputs) + tf_model.enc_to_dec_proj.kernel.assign(_tf_model.enc_to_dec_proj.kernel) + tf_model.enc_to_dec_proj.bias.assign(_tf_model.enc_to_dec_proj.bias) + + with tempfile.TemporaryDirectory() as tmpdirname: + encoder_dir = os.path.join(tmpdirname, "encoder") + decoder_dir = os.path.join(tmpdirname, "decoder") + tf_model.encoder.save_pretrained(encoder_dir) + tf_model.decoder.save_pretrained(decoder_dir) + + if hasattr(tf_model, "enc_to_dec_proj"): + enc_to_dec_proj_weight = torch.transpose( + torch.from_numpy(tf_model.enc_to_dec_proj.kernel.numpy()), 1, 0 + ) + enc_to_dec_proj_bias = torch.from_numpy(tf_model.enc_to_dec_proj.bias.numpy()) + + del _tf_model + del tf_model + gc.collect() + + model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( + encoder_dir, decoder_dir, encoder_from_tf=True, decoder_from_tf=True + ) + # This is only for copying some specific attributes of this particular model. + model.config = config + + if hasattr(model, "enc_to_dec_proj"): + model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous() + model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous() + + return model + + # At the moment fast initialization is not supported for composite models + if kwargs.get("_fast_init", False): + logger.warning( + "Fast initialization is currently not supported for VisionEncoderDecoderModel. " + "Falling back to slow initialization..." + ) + kwargs["_fast_init"] = False + + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + + @classmethod + def from_encoder_decoder_pretrained( + cls, + encoder_pretrained_model_name_or_path: str = None, + decoder_pretrained_model_name_or_path: str = None, + *model_args, + **kwargs, + ) -> PreTrainedModel: + r""" + Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model + checkpoints. + + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you need to first set it back in training mode with `model.train()`. + + Params: + encoder_pretrained_model_name_or_path (`str`, *optional*): + Information necessary to initiate the image encoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An + example is `google/vit-base-patch16-224-in21k`. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + + decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): + Information necessary to initiate the text decoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + + model_args (remaining positional arguments, *optional*): + All remaning positional arguments will be passed to the underlying model's `__init__` method. + + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). + + - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. + - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. + - To update the parent model configuration, do not use a prefix for each configuration parameter. + + Behaves differently depending on whether a `config` is provided or automatically loaded. + + Example: + + ```python + >>> from transformers import VisionEncoderDecoderModel + + >>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized + >>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google/vit-base-patch16-224-in21k", "google-bert/bert-base-uncased" + ... ) + >>> # saving model after fine-tuning + >>> model.save_pretrained("./vit-bert") + >>> # load fine-tuned model + >>> model = VisionEncoderDecoderModel.from_pretrained("./vit-bert") + ```""" + + kwargs_encoder = { + argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") + } + + kwargs_decoder = { + argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") + } + + # remove encoder, decoder kwargs from kwargs + for key in kwargs_encoder.keys(): + del kwargs["encoder_" + key] + for key in kwargs_decoder.keys(): + del kwargs["decoder_" + key] + + # Load and initialize the encoder and decoder + # The distinction between encoder and decoder at the model level is made + # by the value of the flag `is_decoder` that we need to set correctly. + encoder = kwargs_encoder.pop("model", None) + if encoder is None: + if encoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_encoder: + encoder_config, kwargs_encoder = AutoConfig.from_pretrained( + encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True + ) + + if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: + logger.info( + f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " + "from a decoder model. Cross-attention and casual mask are disabled." + ) + encoder_config.is_decoder = False + encoder_config.add_cross_attention = False + + kwargs_encoder["config"] = encoder_config + + encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) + + decoder = kwargs_decoder.pop("model", None) + if decoder is None: + if decoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_decoder: + decoder_config, kwargs_decoder = AutoConfig.from_pretrained( + decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True + ) + + if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: + logger.info( + f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" + f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" + f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." + ) + decoder_config.is_decoder = True + decoder_config.add_cross_attention = True + + kwargs_decoder["config"] = decoder_config + + if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: + logger.warning( + f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " + f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " + "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " + "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " + "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" + ) + + decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) + + # instantiate config with corresponding kwargs + config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) + + # make sure input & output embeddings is not tied + config.tie_word_embeddings = False + return cls(encoder=encoder, decoder=decoder, config=config) + + @add_start_docstrings_to_model_forward(VISION_ENCODER_DECODER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, VisionEncoderDecoderModel + >>> import requests + >>> from PIL import Image + >>> import torch + + >>> processor = AutoProcessor.from_pretrained("microsoft/trocr-base-handwritten") + >>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") + + >>> # load image from the IAM dataset + >>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + + >>> # training + >>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id + >>> model.config.pad_token_id = processor.tokenizer.pad_token_id + >>> model.config.vocab_size = model.config.decoder.vocab_size + + >>> pixel_values = processor(image, return_tensors="pt").pixel_values + >>> text = "hello world" + >>> labels = processor.tokenizer(text, return_tensors="pt").input_ids + >>> outputs = model(pixel_values=pixel_values, labels=labels) + >>> loss = outputs.loss + + >>> # inference (generation) + >>> generated_ids = model.generate(pixel_values) + >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")} + + kwargs_decoder = { + argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") + } + + if encoder_outputs is None: + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + encoder_outputs = self.encoder( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs_encoder, + ) + elif isinstance(encoder_outputs, tuple): + encoder_outputs = BaseModelOutput(*encoder_outputs) + + encoder_hidden_states = encoder_outputs[0] + + # optionally project encoder_hidden_states + if ( + self.encoder.config.hidden_size != self.decoder.config.hidden_size + and self.decoder.config.cross_attention_hidden_size is None + ): + encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) + + # else: + encoder_attention_mask = None + + if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + inputs_embeds=decoder_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + use_cache=use_cache, + past_key_values=past_key_values, + return_dict=return_dict, + **kwargs_decoder, + ) + + # Compute loss independent from decoder (as some shift the logits inside them) + loss = None + if labels is not None: + logits = decoder_outputs.logits if return_dict else decoder_outputs[0] + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.reshape(-1)) + + if not return_dict: + if loss is not None: + return (loss,) + decoder_outputs + encoder_outputs + else: + return decoder_outputs + encoder_outputs + + return Seq2SeqLMOutput( + loss=loss, + logits=decoder_outputs.logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs + ): + decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values) + decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None + input_dict = { + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "decoder_input_ids": decoder_inputs["input_ids"], + "encoder_outputs": encoder_outputs, + "past_key_values": decoder_inputs["past_key_values"], + "use_cache": use_cache, + } + return input_dict + + def resize_token_embeddings(self, *args, **kwargs): + raise NotImplementedError( + "Resizing the embedding layers via the VisionEncoderDecoderModel directly is not supported.Please use the" + " respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))" + ) + + def _reorder_cache(self, past_key_values, beam_idx): + # apply decoder cache reordering here + return self.decoder._reorder_cache(past_key_values, beam_idx) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/vits/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79c18048e7c776dbee992b249cd53098f544daaa --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vits/__init__.py @@ -0,0 +1,67 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_sentencepiece_available, + is_speech_available, + is_torch_available, +) + + +_import_structure = { + "configuration_vits": [ + "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "VitsConfig", + ], + "tokenization_vits": ["VitsTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_vits"] = [ + "VITS_PRETRAINED_MODEL_ARCHIVE_LIST", + "VitsModel", + "VitsPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_vits import ( + VITS_PRETRAINED_CONFIG_ARCHIVE_MAP, + VitsConfig, + ) + from .tokenization_vits import VitsTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_vits import ( + VITS_PRETRAINED_MODEL_ARCHIVE_LIST, + VitsModel, + VitsPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfefc34a524115faf663d8fe464632086ae4daab Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/configuration_vits.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/configuration_vits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54f05bf2bbea287fd19534664b21a840489a8933 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/configuration_vits.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/convert_original_checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/convert_original_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3db0300201b4ef2d671e0ceefa8d9a3ac1ca191d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/convert_original_checkpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/modeling_vits.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/modeling_vits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6bcec33732367fafbc83efea38907131ca2c28f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/modeling_vits.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/tokenization_vits.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/tokenization_vits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01c2301489be1b3f9f40ec5a3bc1c321e4698974 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vits/__pycache__/tokenization_vits.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/configuration_vits.py b/venv/lib/python3.10/site-packages/transformers/models/vits/configuration_vits.py new file mode 100644 index 0000000000000000000000000000000000000000..5538e53d4be1b84b70ff18e75df851b17c9edcb7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vits/configuration_vits.py @@ -0,0 +1,254 @@ +# coding=utf-8 +# Copyright 2023 The Kakao Enterprise Authors and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" VITS model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import VITS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class VitsConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`VitsModel`]. It is used to instantiate a VITS + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the VITS + [facebook/mms-tts-eng](https://huggingface.co/facebook/mms-tts-eng) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 38): + Vocabulary size of the VITS model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed to the forward method of [`VitsModel`]. + hidden_size (`int`, *optional*, defaults to 192): + Dimensionality of the text encoder layers. + num_hidden_layers (`int`, *optional*, defaults to 6): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 2): + Number of attention heads for each attention layer in the Transformer encoder. + window_size (`int`, *optional*, defaults to 4): + Window size for the relative positional embeddings in the attention layers of the Transformer encoder. + use_bias (`bool`, *optional*, defaults to `True`): + Whether to use bias in the key, query, value projection layers in the Transformer encoder. + ffn_dim (`int`, *optional*, defaults to 768): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + layerdrop (`float`, *optional*, defaults to 0.1): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + ffn_kernel_size (`int`, *optional*, defaults to 3): + Kernel size of the 1D convolution layers used by the feed-forward network in the Transformer encoder. + flow_size (`int`, *optional*, defaults to 192): + Dimensionality of the flow layers. + spectrogram_bins (`int`, *optional*, defaults to 513): + Number of frequency bins in the target spectrogram. + hidden_act (`str` or `function`, *optional*, defaults to `"relu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings and encoder. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for activations inside the fully connected layer. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + use_stochastic_duration_prediction (`bool`, *optional*, defaults to `True`): + Whether to use the stochastic duration prediction module or the regular duration predictor. + num_speakers (`int`, *optional*, defaults to 1): + Number of speakers if this is a multi-speaker model. + speaker_embedding_size (`int`, *optional*, defaults to 0): + Number of channels used by the speaker embeddings. Is zero for single-speaker models. + upsample_initial_channel (`int`, *optional*, defaults to 512): + The number of input channels into the HiFi-GAN upsampling network. + upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 2, 2]`): + A tuple of integers defining the stride of each 1D convolutional layer in the HiFi-GAN upsampling network. + The length of `upsample_rates` defines the number of convolutional layers and has to match the length of + `upsample_kernel_sizes`. + upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[16, 16, 4, 4]`): + A tuple of integers defining the kernel size of each 1D convolutional layer in the HiFi-GAN upsampling + network. The length of `upsample_kernel_sizes` defines the number of convolutional layers and has to match + the length of `upsample_rates`. + resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`): + A tuple of integers defining the kernel sizes of the 1D convolutional layers in the HiFi-GAN + multi-receptive field fusion (MRF) module. + resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): + A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the + HiFi-GAN multi-receptive field fusion (MRF) module. + leaky_relu_slope (`float`, *optional*, defaults to 0.1): + The angle of the negative slope used by the leaky ReLU activation. + depth_separable_channels (`int`, *optional*, defaults to 2): + Number of channels to use in each depth-separable block. + depth_separable_num_layers (`int`, *optional*, defaults to 3): + Number of convolutional layers to use in each depth-separable block. + duration_predictor_flow_bins (`int`, *optional*, defaults to 10): + Number of channels to map using the unonstrained rational spline in the duration predictor model. + duration_predictor_tail_bound (`float`, *optional*, defaults to 5.0): + Value of the tail bin boundary when computing the unconstrained rational spline in the duration predictor + model. + duration_predictor_kernel_size (`int`, *optional*, defaults to 3): + Kernel size of the 1D convolution layers used in the duration predictor model. + duration_predictor_dropout (`float`, *optional*, defaults to 0.5): + The dropout ratio for the duration predictor model. + duration_predictor_num_flows (`int`, *optional*, defaults to 4): + Number of flow stages used by the duration predictor model. + duration_predictor_filter_channels (`int`, *optional*, defaults to 256): + Number of channels for the convolution layers used in the duration predictor model. + prior_encoder_num_flows (`int`, *optional*, defaults to 4): + Number of flow stages used by the prior encoder flow model. + prior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 4): + Number of WaveNet layers used by the prior encoder flow model. + posterior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 16): + Number of WaveNet layers used by the posterior encoder model. + wavenet_kernel_size (`int`, *optional*, defaults to 5): + Kernel size of the 1D convolution layers used in the WaveNet model. + wavenet_dilation_rate (`int`, *optional*, defaults to 1): + Dilation rates of the dilated 1D convolutional layers used in the WaveNet model. + wavenet_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the WaveNet layers. + speaking_rate (`float`, *optional*, defaults to 1.0): + Speaking rate. Larger values give faster synthesised speech. + noise_scale (`float`, *optional*, defaults to 0.667): + How random the speech prediction is. Larger values create more variation in the predicted speech. + noise_scale_duration (`float`, *optional*, defaults to 0.8): + How random the duration prediction is. Larger values create more variation in the predicted durations. + sampling_rate (`int`, *optional*, defaults to 16000): + The sampling rate at which the output audio waveform is digitalized expressed in hertz (Hz). + + Example: + + ```python + >>> from transformers import VitsModel, VitsConfig + + >>> # Initializing a "facebook/mms-tts-eng" style configuration + >>> configuration = VitsConfig() + + >>> # Initializing a model (with random weights) from the "facebook/mms-tts-eng" style configuration + >>> model = VitsModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "vits" + + def __init__( + self, + vocab_size=38, + hidden_size=192, + num_hidden_layers=6, + num_attention_heads=2, + window_size=4, + use_bias=True, + ffn_dim=768, + layerdrop=0.1, + ffn_kernel_size=3, + flow_size=192, + spectrogram_bins=513, + hidden_act="relu", + hidden_dropout=0.1, + attention_dropout=0.1, + activation_dropout=0.1, + initializer_range=0.02, + layer_norm_eps=1e-5, + use_stochastic_duration_prediction=True, + num_speakers=1, + speaker_embedding_size=0, + upsample_initial_channel=512, + upsample_rates=[8, 8, 2, 2], + upsample_kernel_sizes=[16, 16, 4, 4], + resblock_kernel_sizes=[3, 7, 11], + resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], + leaky_relu_slope=0.1, + depth_separable_channels=2, + depth_separable_num_layers=3, + duration_predictor_flow_bins=10, + duration_predictor_tail_bound=5.0, + duration_predictor_kernel_size=3, + duration_predictor_dropout=0.5, + duration_predictor_num_flows=4, + duration_predictor_filter_channels=256, + prior_encoder_num_flows=4, + prior_encoder_num_wavenet_layers=4, + posterior_encoder_num_wavenet_layers=16, + wavenet_kernel_size=5, + wavenet_dilation_rate=1, + wavenet_dropout=0.0, + speaking_rate=1.0, + noise_scale=0.667, + noise_scale_duration=0.8, + sampling_rate=16_000, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.window_size = window_size + self.use_bias = use_bias + self.ffn_dim = ffn_dim + self.layerdrop = layerdrop + self.ffn_kernel_size = ffn_kernel_size + self.flow_size = flow_size + self.spectrogram_bins = spectrogram_bins + self.hidden_act = hidden_act + self.hidden_dropout = hidden_dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.use_stochastic_duration_prediction = use_stochastic_duration_prediction + self.num_speakers = num_speakers + self.speaker_embedding_size = speaker_embedding_size + self.upsample_initial_channel = upsample_initial_channel + self.upsample_rates = upsample_rates + self.upsample_kernel_sizes = upsample_kernel_sizes + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.leaky_relu_slope = leaky_relu_slope + self.depth_separable_channels = depth_separable_channels + self.depth_separable_num_layers = depth_separable_num_layers + self.duration_predictor_flow_bins = duration_predictor_flow_bins + self.duration_predictor_tail_bound = duration_predictor_tail_bound + self.duration_predictor_kernel_size = duration_predictor_kernel_size + self.duration_predictor_dropout = duration_predictor_dropout + self.duration_predictor_num_flows = duration_predictor_num_flows + self.duration_predictor_filter_channels = duration_predictor_filter_channels + self.prior_encoder_num_flows = prior_encoder_num_flows + self.prior_encoder_num_wavenet_layers = prior_encoder_num_wavenet_layers + self.posterior_encoder_num_wavenet_layers = posterior_encoder_num_wavenet_layers + self.wavenet_kernel_size = wavenet_kernel_size + self.wavenet_dilation_rate = wavenet_dilation_rate + self.wavenet_dropout = wavenet_dropout + self.speaking_rate = speaking_rate + self.noise_scale = noise_scale + self.noise_scale_duration = noise_scale_duration + self.sampling_rate = sampling_rate + + if len(upsample_kernel_sizes) != len(upsample_rates): + raise ValueError( + f"The length of `upsample_kernel_sizes` ({len(upsample_kernel_sizes)}) must match the length of " + f"`upsample_rates` ({len(upsample_rates)})" + ) + + super().__init__(**kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/convert_original_checkpoint.py b/venv/lib/python3.10/site-packages/transformers/models/vits/convert_original_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..267f72ccd08fc26f7bdd1a56747a1dbc8d697cb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vits/convert_original_checkpoint.py @@ -0,0 +1,390 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert VITS checkpoint.""" + +import argparse +import json +import tempfile + +import torch +from huggingface_hub import hf_hub_download + +from transformers import VitsConfig, VitsModel, VitsTokenizer, logging + + +logging.set_verbosity_info() +logger = logging.get_logger("transformers.models.vits") + +MAPPING_TEXT_ENCODER = { + "enc_p.emb": "text_encoder.embed_tokens", + "enc_p.encoder.attn_layers.*.conv_k": "text_encoder.encoder.layers.*.attention.k_proj", + "enc_p.encoder.attn_layers.*.conv_v": "text_encoder.encoder.layers.*.attention.v_proj", + "enc_p.encoder.attn_layers.*.conv_q": "text_encoder.encoder.layers.*.attention.q_proj", + "enc_p.encoder.attn_layers.*.conv_o": "text_encoder.encoder.layers.*.attention.out_proj", + "enc_p.encoder.attn_layers.*.emb_rel_k": "text_encoder.encoder.layers.*.attention.emb_rel_k", + "enc_p.encoder.attn_layers.*.emb_rel_v": "text_encoder.encoder.layers.*.attention.emb_rel_v", + "enc_p.encoder.norm_layers_1.*.gamma": "text_encoder.encoder.layers.*.layer_norm.weight", + "enc_p.encoder.norm_layers_1.*.beta": "text_encoder.encoder.layers.*.layer_norm.bias", + "enc_p.encoder.ffn_layers.*.conv_1": "text_encoder.encoder.layers.*.feed_forward.conv_1", + "enc_p.encoder.ffn_layers.*.conv_2": "text_encoder.encoder.layers.*.feed_forward.conv_2", + "enc_p.encoder.norm_layers_2.*.gamma": "text_encoder.encoder.layers.*.final_layer_norm.weight", + "enc_p.encoder.norm_layers_2.*.beta": "text_encoder.encoder.layers.*.final_layer_norm.bias", + "enc_p.proj": "text_encoder.project", +} +MAPPING_STOCHASTIC_DURATION_PREDICTOR = { + "dp.pre": "duration_predictor.conv_pre", + "dp.proj": "duration_predictor.conv_proj", + "dp.convs.convs_sep.*": "duration_predictor.conv_dds.convs_dilated.*", + "dp.convs.convs_1x1.*": "duration_predictor.conv_dds.convs_pointwise.*", + "dp.convs.norms_1.*.gamma": "duration_predictor.conv_dds.norms_1.*.weight", + "dp.convs.norms_1.*.beta": "duration_predictor.conv_dds.norms_1.*.bias", + "dp.convs.norms_2.*.gamma": "duration_predictor.conv_dds.norms_2.*.weight", + "dp.convs.norms_2.*.beta": "duration_predictor.conv_dds.norms_2.*.bias", + "dp.flows.0.logs": "duration_predictor.flows.0.log_scale", + "dp.flows.0.m": "duration_predictor.flows.0.translate", + "dp.flows.*.pre": "duration_predictor.flows.*.conv_pre", + "dp.flows.*.proj": "duration_predictor.flows.*.conv_proj", + "dp.flows.*.convs.convs_1x1.0": "duration_predictor.flows.*.conv_dds.convs_pointwise.0", + "dp.flows.*.convs.convs_1x1.1": "duration_predictor.flows.*.conv_dds.convs_pointwise.1", + "dp.flows.*.convs.convs_1x1.2": "duration_predictor.flows.*.conv_dds.convs_pointwise.2", + "dp.flows.*.convs.convs_sep.0": "duration_predictor.flows.*.conv_dds.convs_dilated.0", + "dp.flows.*.convs.convs_sep.1": "duration_predictor.flows.*.conv_dds.convs_dilated.1", + "dp.flows.*.convs.convs_sep.2": "duration_predictor.flows.*.conv_dds.convs_dilated.2", + "dp.flows.*.convs.norms_1.0.gamma": "duration_predictor.flows.*.conv_dds.norms_1.0.weight", + "dp.flows.*.convs.norms_1.0.beta": "duration_predictor.flows.*.conv_dds.norms_1.0.bias", + "dp.flows.*.convs.norms_1.1.gamma": "duration_predictor.flows.*.conv_dds.norms_1.1.weight", + "dp.flows.*.convs.norms_1.1.beta": "duration_predictor.flows.*.conv_dds.norms_1.1.bias", + "dp.flows.*.convs.norms_1.2.gamma": "duration_predictor.flows.*.conv_dds.norms_1.2.weight", + "dp.flows.*.convs.norms_1.2.beta": "duration_predictor.flows.*.conv_dds.norms_1.2.bias", + "dp.flows.*.convs.norms_2.0.gamma": "duration_predictor.flows.*.conv_dds.norms_2.0.weight", + "dp.flows.*.convs.norms_2.0.beta": "duration_predictor.flows.*.conv_dds.norms_2.0.bias", + "dp.flows.*.convs.norms_2.1.gamma": "duration_predictor.flows.*.conv_dds.norms_2.1.weight", + "dp.flows.*.convs.norms_2.1.beta": "duration_predictor.flows.*.conv_dds.norms_2.1.bias", + "dp.flows.*.convs.norms_2.2.gamma": "duration_predictor.flows.*.conv_dds.norms_2.2.weight", + "dp.flows.*.convs.norms_2.2.beta": "duration_predictor.flows.*.conv_dds.norms_2.2.bias", + "dp.post_pre": "duration_predictor.post_conv_pre", + "dp.post_proj": "duration_predictor.post_conv_proj", + "dp.post_convs.convs_sep.*": "duration_predictor.post_conv_dds.convs_dilated.*", + "dp.post_convs.convs_1x1.*": "duration_predictor.post_conv_dds.convs_pointwise.*", + "dp.post_convs.norms_1.*.gamma": "duration_predictor.post_conv_dds.norms_1.*.weight", + "dp.post_convs.norms_1.*.beta": "duration_predictor.post_conv_dds.norms_1.*.bias", + "dp.post_convs.norms_2.*.gamma": "duration_predictor.post_conv_dds.norms_2.*.weight", + "dp.post_convs.norms_2.*.beta": "duration_predictor.post_conv_dds.norms_2.*.bias", + "dp.post_flows.0.logs": "duration_predictor.post_flows.0.log_scale", + "dp.post_flows.0.m": "duration_predictor.post_flows.0.translate", + "dp.post_flows.*.pre": "duration_predictor.post_flows.*.conv_pre", + "dp.post_flows.*.proj": "duration_predictor.post_flows.*.conv_proj", + "dp.post_flows.*.convs.convs_1x1.0": "duration_predictor.post_flows.*.conv_dds.convs_pointwise.0", + "dp.post_flows.*.convs.convs_1x1.1": "duration_predictor.post_flows.*.conv_dds.convs_pointwise.1", + "dp.post_flows.*.convs.convs_1x1.2": "duration_predictor.post_flows.*.conv_dds.convs_pointwise.2", + "dp.post_flows.*.convs.convs_sep.0": "duration_predictor.post_flows.*.conv_dds.convs_dilated.0", + "dp.post_flows.*.convs.convs_sep.1": "duration_predictor.post_flows.*.conv_dds.convs_dilated.1", + "dp.post_flows.*.convs.convs_sep.2": "duration_predictor.post_flows.*.conv_dds.convs_dilated.2", + "dp.post_flows.*.convs.norms_1.0.gamma": "duration_predictor.post_flows.*.conv_dds.norms_1.0.weight", + "dp.post_flows.*.convs.norms_1.0.beta": "duration_predictor.post_flows.*.conv_dds.norms_1.0.bias", + "dp.post_flows.*.convs.norms_1.1.gamma": "duration_predictor.post_flows.*.conv_dds.norms_1.1.weight", + "dp.post_flows.*.convs.norms_1.1.beta": "duration_predictor.post_flows.*.conv_dds.norms_1.1.bias", + "dp.post_flows.*.convs.norms_1.2.gamma": "duration_predictor.post_flows.*.conv_dds.norms_1.2.weight", + "dp.post_flows.*.convs.norms_1.2.beta": "duration_predictor.post_flows.*.conv_dds.norms_1.2.bias", + "dp.post_flows.*.convs.norms_2.0.gamma": "duration_predictor.post_flows.*.conv_dds.norms_2.0.weight", + "dp.post_flows.*.convs.norms_2.0.beta": "duration_predictor.post_flows.*.conv_dds.norms_2.0.bias", + "dp.post_flows.*.convs.norms_2.1.gamma": "duration_predictor.post_flows.*.conv_dds.norms_2.1.weight", + "dp.post_flows.*.convs.norms_2.1.beta": "duration_predictor.post_flows.*.conv_dds.norms_2.1.bias", + "dp.post_flows.*.convs.norms_2.2.gamma": "duration_predictor.post_flows.*.conv_dds.norms_2.2.weight", + "dp.post_flows.*.convs.norms_2.2.beta": "duration_predictor.post_flows.*.conv_dds.norms_2.2.bias", + "dp.cond": "duration_predictor.cond", # num_speakers > 1 +} +MAPPING_FLOW = { + "flow.flows.*.pre": "flow.flows.*.conv_pre", + "flow.flows.*.enc.in_layers.0": "flow.flows.*.wavenet.in_layers.0", + "flow.flows.*.enc.in_layers.1": "flow.flows.*.wavenet.in_layers.1", + "flow.flows.*.enc.in_layers.2": "flow.flows.*.wavenet.in_layers.2", + "flow.flows.*.enc.in_layers.3": "flow.flows.*.wavenet.in_layers.3", + "flow.flows.*.enc.res_skip_layers.0": "flow.flows.*.wavenet.res_skip_layers.0", + "flow.flows.*.enc.res_skip_layers.1": "flow.flows.*.wavenet.res_skip_layers.1", + "flow.flows.*.enc.res_skip_layers.2": "flow.flows.*.wavenet.res_skip_layers.2", + "flow.flows.*.enc.res_skip_layers.3": "flow.flows.*.wavenet.res_skip_layers.3", + "flow.flows.*.enc.cond_layer": "flow.flows.*.wavenet.cond_layer", # num_speakers > 1 + "flow.flows.*.post": "flow.flows.*.conv_post", +} +MAPPING_GENERATOR = { + "dec.conv_pre": "decoder.conv_pre", + "dec.ups.0": "decoder.upsampler.0", + "dec.ups.1": "decoder.upsampler.1", + "dec.ups.2": "decoder.upsampler.2", + "dec.ups.3": "decoder.upsampler.3", + "dec.resblocks.*.convs1.0": "decoder.resblocks.*.convs1.0", + "dec.resblocks.*.convs1.1": "decoder.resblocks.*.convs1.1", + "dec.resblocks.*.convs1.2": "decoder.resblocks.*.convs1.2", + "dec.resblocks.*.convs2.0": "decoder.resblocks.*.convs2.0", + "dec.resblocks.*.convs2.1": "decoder.resblocks.*.convs2.1", + "dec.resblocks.*.convs2.2": "decoder.resblocks.*.convs2.2", + "dec.conv_post": "decoder.conv_post", + "dec.cond": "decoder.cond", # num_speakers > 1 +} +MAPPING_POSTERIOR_ENCODER = { + "enc_q.pre": "posterior_encoder.conv_pre", + "enc_q.enc.in_layers.*": "posterior_encoder.wavenet.in_layers.*", + "enc_q.enc.res_skip_layers.*": "posterior_encoder.wavenet.res_skip_layers.*", + "enc_q.enc.cond_layer": "posterior_encoder.wavenet.cond_layer", # num_speakers > 1 + "enc_q.proj": "posterior_encoder.conv_proj", +} +MAPPING = { + **MAPPING_TEXT_ENCODER, + **MAPPING_STOCHASTIC_DURATION_PREDICTOR, + **MAPPING_FLOW, + **MAPPING_GENERATOR, + **MAPPING_POSTERIOR_ENCODER, + "emb_g": "embed_speaker", # num_speakers > 1 +} +TOP_LEVEL_KEYS = [] +IGNORE_KEYS = [] + + +def set_recursively(hf_pointer, key, value, full_name, weight_type): + for attribute in key.split("."): + hf_pointer = getattr(hf_pointer, attribute) + + if weight_type is not None: + hf_shape = getattr(hf_pointer, weight_type).shape + else: + hf_shape = hf_pointer.shape + + # strip off the kernel dimension at the end (original weights are Conv1d) + if key.endswith(".k_proj") or key.endswith(".v_proj") or key.endswith(".q_proj") or key.endswith(".out_proj"): + value = value.squeeze(-1) + + if hf_shape != value.shape: + raise ValueError( + f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" + f" {value.shape} for {full_name}" + ) + + if weight_type == "weight": + hf_pointer.weight.data = value + elif weight_type == "weight_g": + hf_pointer.weight_g.data = value + elif weight_type == "weight_v": + hf_pointer.weight_v.data = value + elif weight_type == "bias": + hf_pointer.bias.data = value + elif weight_type == "running_mean": + hf_pointer.running_mean.data = value + elif weight_type == "running_var": + hf_pointer.running_var.data = value + elif weight_type == "num_batches_tracked": + hf_pointer.num_batches_tracked.data = value + else: + hf_pointer.data = value + + logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.") + + +def should_ignore(name, ignore_keys): + for key in ignore_keys: + if key.endswith(".*"): + if name.startswith(key[:-1]): + return True + elif ".*." in key: + prefix, suffix = key.split(".*.") + if prefix in name and suffix in name: + return True + elif key in name: + return True + return False + + +def recursively_load_weights(fairseq_dict, hf_model): + unused_weights = [] + + for name, value in fairseq_dict.items(): + if should_ignore(name, IGNORE_KEYS): + logger.info(f"{name} was ignored") + continue + + is_used = False + for key, mapped_key in MAPPING.items(): + if key.endswith(".*"): + key = key[:-1] + elif "*" in key: + prefix, suffix = key.split(".*.") + if prefix in name and suffix in name: + key = suffix + + if key in name: + is_used = True + if mapped_key.endswith(".*"): + layer_index = name.split(key)[-1].split(".")[0] + mapped_key = mapped_key.replace("*", layer_index) + elif "*" in mapped_key: + layer_index = name.split(key)[0].split(".")[-2] + + # remap the layer index since we removed the Flip layers + if "flow.flows" in mapped_key: + layer_index = str(int(layer_index) // 2) + if "duration_predictor.flows" in mapped_key or "duration_predictor.post_flows" in mapped_key: + layer_index = str(int(layer_index) // 2 + 1) + + mapped_key = mapped_key.replace("*", layer_index) + if "weight_g" in name: + weight_type = "weight_g" + elif "weight_v" in name: + weight_type = "weight_v" + elif "bias" in name: + weight_type = "bias" + elif "weight" in name: + weight_type = "weight" + elif "running_mean" in name: + weight_type = "running_mean" + elif "running_var" in name: + weight_type = "running_var" + elif "num_batches_tracked" in name: + weight_type = "num_batches_tracked" + else: + weight_type = None + set_recursively(hf_model, mapped_key, value, name, weight_type) + continue + if not is_used: + unused_weights.append(name) + + logger.warning(f"Unused weights: {unused_weights}") + + +@torch.no_grad() +def convert_checkpoint( + pytorch_dump_folder_path, + checkpoint_path=None, + config_path=None, + vocab_path=None, + language=None, + num_speakers=None, + sampling_rate=None, + repo_id=None, +): + """ + Copy/paste/tweak model's weights to transformers design. + """ + if config_path is not None: + config = VitsConfig.from_pretrained(config_path) + else: + config = VitsConfig() + + if num_speakers: + config.num_speakers = num_speakers + config.speaker_embedding_size = 256 + + if sampling_rate: + config.sampling_rate = sampling_rate + + if checkpoint_path is None: + logger.info(f"***Converting model: facebook/mms-tts {language}***") + + vocab_path = hf_hub_download( + repo_id="facebook/mms-tts", + filename="vocab.txt", + subfolder=f"models/{language}", + ) + config_file = hf_hub_download( + repo_id="facebook/mms-tts", + filename="config.json", + subfolder=f"models/{language}", + ) + checkpoint_path = hf_hub_download( + repo_id="facebook/mms-tts", + filename="G_100000.pth", + subfolder=f"models/{language}", + ) + + with open(config_file, "r") as f: + data = f.read() + hps = json.loads(data) + + is_uroman = hps["data"]["training_files"].split(".")[-1] == "uroman" + if is_uroman: + logger.warning("For this checkpoint, you should use `uroman` to convert input text before tokenizing it!") + else: + logger.info(f"***Converting model: {checkpoint_path}***") + is_uroman = False + + # original VITS checkpoint + if vocab_path is None: + _pad = "_" + _punctuation = ';:,.!?¡¿—…"«»“” ' + _letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" + symbols = _pad + _punctuation + _letters + _letters_ipa + symbol_to_id = {s: i for i, s in enumerate(symbols)} + phonemize = True + else: + # Save vocab as temporary json file + symbols = [line.replace("\n", "") for line in open(vocab_path, encoding="utf-8").readlines()] + symbol_to_id = {s: i for i, s in enumerate(symbols)} + # MMS-TTS does not use a token, so we set to the token used to space characters + _pad = symbols[0] + phonemize = False + + with tempfile.NamedTemporaryFile() as tf: + with open(tf.name, "w", encoding="utf-8") as f: + f.write(json.dumps(symbol_to_id, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + tokenizer = VitsTokenizer(tf.name, language=language, phonemize=phonemize, is_uroman=is_uroman, pad_token=_pad) + + config.vocab_size = len(symbols) + model = VitsModel(config) + + model.decoder.apply_weight_norm() + + orig_checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) + recursively_load_weights(orig_checkpoint["model"], model) + + model.decoder.remove_weight_norm() + + model.save_pretrained(pytorch_dump_folder_path) + tokenizer.save_pretrained(pytorch_dump_folder_path) + + if repo_id: + print("Pushing to the hub...") + tokenizer.push_to_hub(repo_id) + model.push_to_hub(repo_id) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--checkpoint_path", default=None, type=str, help="Local path to original checkpoint") + parser.add_argument("--vocab_path", default=None, type=str, help="Path to vocab.txt") + parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") + parser.add_argument("--language", default=None, type=str, help="Tokenizer language (three-letter code)") + parser.add_argument("--num_speakers", default=None, type=int, help="Number of speakers") + parser.add_argument( + "--sampling_rate", default=None, type=int, help="Sampling rate on which the model was trained." + ) + parser.add_argument( + "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." + ) + + args = parser.parse_args() + convert_checkpoint( + args.pytorch_dump_folder_path, + args.checkpoint_path, + args.config_path, + args.vocab_path, + args.language, + args.num_speakers, + args.sampling_rate, + args.push_to_hub, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/modeling_vits.py b/venv/lib/python3.10/site-packages/transformers/models/vits/modeling_vits.py new file mode 100644 index 0000000000000000000000000000000000000000..df8cf9350b3128dd067eb4989c8600503359a064 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vits/modeling_vits.py @@ -0,0 +1,1483 @@ +# coding=utf-8 +# Copyright 2023 The Kakao Enterprise Authors and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch VITS model.""" + +import math +from dataclasses import dataclass +from typing import Any, Optional, Tuple, Union + +import numpy as np +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...integrations.deepspeed import is_deepspeed_zero3_enabled +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask +from ...modeling_outputs import ( + BaseModelOutput, + ModelOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_vits import VitsConfig + + +logger = logging.get_logger(__name__) + + +# General docstring +_CONFIG_FOR_DOC = "VitsConfig" + + +from ..deprecated._archive_maps import VITS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +class VitsModelOutput(ModelOutput): + """ + Describes the outputs for the VITS model, with potential hidden states and attentions. + + Args: + waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + The final audio waveform predicted by the model. + sequence_lengths (`torch.FloatTensor` of shape `(batch_size,)`): + The length in samples of each element in the `waveform` batch. + spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): + The log-mel spectrogram predicted at the output of the flow model. This spectrogram is passed to the Hi-Fi + GAN decoder model to obtain the final audio waveform. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attention weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + waveform: torch.FloatTensor = None + sequence_lengths: torch.FloatTensor = None + spectrogram: Optional[Tuple[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class VitsTextEncoderOutput(ModelOutput): + """ + Describes the outputs for the VITS text encoder model, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + prior_means (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + The predicted mean values of the prior distribution for the latent text variables. + prior_log_variances (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + The predicted log-variance values of the prior distribution for the latent text variables. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attention weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + prior_means: torch.FloatTensor = None + prior_log_variances: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, num_channels): + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :num_channels, :]) + s_act = torch.sigmoid(in_act[:, num_channels:, :]) + acts = t_act * s_act + return acts + + +def _unconstrained_rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + reverse=False, + tail_bound=5.0, + min_bin_width=1e-3, + min_bin_height=1e-3, + min_derivative=1e-3, +): + """ + This transformation represents a monotonically increasing piecewise rational quadratic function. Outside of the + `tail_bound`, the transform behaves as an identity function. + + Args: + inputs (`torch.FloatTensor` of shape `(batch_size, channels, seq_len)`: + Second half of the hidden-states input to the Vits convolutional flow module. + unnormalized_widths (`torch.FloatTensor` of shape `(batch_size, channels, seq_len, duration_predictor_flow_bins)`): + First `duration_predictor_flow_bins` of the hidden-states from the output of the convolution projection + layer in the convolutional flow module + unnormalized_heights (`torch.FloatTensor` of shape `(batch_size, channels, seq_len, duration_predictor_flow_bins)`): + Second `duration_predictor_flow_bins` of the hidden-states from the output of the convolution projection + layer in the convolutional flow module + unnormalized_derivatives (`torch.FloatTensor` of shape `(batch_size, channels, seq_len, duration_predictor_flow_bins)`): + Third `duration_predictor_flow_bins` of the hidden-states from the output of the convolution projection + layer in the convolutional flow module + reverse (`bool`, *optional*, defaults to `False`): + Whether the model is being run in reverse mode. + tail_bound (`float`, *optional* defaults to 5): + Upper and lower limit bound for the rational quadratic function. Outside of this `tail_bound`, the + transform behaves as an identity function. + min_bin_width (`float`, *optional*, defaults to 1e-3): + Minimum bin value across the width dimension for the piecewise rational quadratic function. + min_bin_height (`float`, *optional*, defaults to 1e-3): + Minimum bin value across the height dimension for the piecewise rational quadratic function. + min_derivative (`float`, *optional*, defaults to 1e-3): + Minimum bin value across the derivatives for the piecewise rational quadratic function. + Returns: + outputs (`torch.FloatTensor` of shape `(batch_size, channels, seq_len)`: + Hidden-states as transformed by the piecewise rational quadratic function with the `tail_bound` limits + applied. + log_abs_det (`torch.FloatTensor` of shape `(batch_size, channels, seq_len)`: + Logarithm of the absolute value of the determinants corresponding to the `outputs` with the `tail_bound` + limits applied. + """ + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + log_abs_det = torch.zeros_like(inputs) + constant = np.log(np.exp(1 - min_derivative) - 1) + + unnormalized_derivatives = nn.functional.pad(unnormalized_derivatives, pad=(1, 1)) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + log_abs_det[outside_interval_mask] = 0.0 + + outputs[inside_interval_mask], log_abs_det[inside_interval_mask] = _rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + reverse=reverse, + tail_bound=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + ) + return outputs, log_abs_det + + +def _rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + reverse, + tail_bound, + min_bin_width, + min_bin_height, + min_derivative, +): + """ + This transformation represents a monotonically increasing piecewise rational quadratic function. Unlike the + function `_unconstrained_rational_quadratic_spline`, the function behaves the same across the `tail_bound`. + + Args: + inputs (`torch.FloatTensor` of shape `(batch_size, channels, seq_len)`: + Second half of the hidden-states input to the Vits convolutional flow module. + unnormalized_widths (`torch.FloatTensor` of shape `(batch_size, channels, seq_len, duration_predictor_flow_bins)`): + First `duration_predictor_flow_bins` of the hidden-states from the output of the convolution projection + layer in the convolutional flow module + unnormalized_heights (`torch.FloatTensor` of shape `(batch_size, channels, seq_len, duration_predictor_flow_bins)`): + Second `duration_predictor_flow_bins` of the hidden-states from the output of the convolution projection + layer in the convolutional flow module + unnormalized_derivatives (`torch.FloatTensor` of shape `(batch_size, channels, seq_len, duration_predictor_flow_bins)`): + Third `duration_predictor_flow_bins` of the hidden-states from the output of the convolution projection + layer in the convolutional flow module + reverse (`bool`): + Whether the model is being run in reverse mode. + tail_bound (`float`): + Upper and lower limit bound for the rational quadratic function. Outside of this `tail_bound`, the + transform behaves as an identity function. + min_bin_width (`float`): + Minimum bin value across the width dimension for the piecewise rational quadratic function. + min_bin_height (`float`): + Minimum bin value across the height dimension for the piecewise rational quadratic function. + min_derivative (`float`): + Minimum bin value across the derivatives for the piecewise rational quadratic function. + Returns: + outputs (`torch.FloatTensor` of shape `(batch_size, channels, seq_len)`: + Hidden-states as transformed by the piecewise rational quadratic function. + log_abs_det (`torch.FloatTensor` of shape `(batch_size, channels, seq_len)`: + Logarithm of the absolute value of the determinants corresponding to the `outputs`. + """ + upper_bound = tail_bound + lower_bound = -tail_bound + + if torch.min(inputs) < lower_bound or torch.max(inputs) > upper_bound: + raise ValueError("Input to a transform is not within its domain") + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError(f"Minimal bin width {min_bin_width} too large for the number of bins {num_bins}") + if min_bin_height * num_bins > 1.0: + raise ValueError(f"Minimal bin height {min_bin_height} too large for the number of bins {num_bins}") + + widths = nn.functional.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = nn.functional.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) + cumwidths = (upper_bound - lower_bound) * cumwidths + lower_bound + cumwidths[..., 0] = lower_bound + cumwidths[..., -1] = upper_bound + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + nn.functional.softplus(unnormalized_derivatives) + + heights = nn.functional.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = nn.functional.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) + cumheights = (upper_bound - lower_bound) * cumheights + lower_bound + cumheights[..., 0] = lower_bound + cumheights[..., -1] = upper_bound + heights = cumheights[..., 1:] - cumheights[..., :-1] + + bin_locations = cumheights if reverse else cumwidths + bin_locations[..., -1] += 1e-6 + bin_idx = torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 + bin_idx = bin_idx[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + intermediate1 = input_derivatives + input_derivatives_plus_one - 2 * input_delta + if not reverse: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta) + denominator = input_delta + intermediate1 * theta_one_minus_theta + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2) + ) + log_abs_det = torch.log(derivative_numerator) - 2 * torch.log(denominator) + return outputs, log_abs_det + else: + # find the roots of a quadratic equation + intermediate2 = inputs - input_cumheights + intermediate3 = intermediate2 * intermediate1 + a = input_heights * (input_delta - input_derivatives) + intermediate3 + b = input_heights * input_derivatives - intermediate3 + c = -input_delta * intermediate2 + + discriminant = b.pow(2) - 4 * a * c + if not (discriminant >= 0).all(): + raise RuntimeError(f"invalid discriminant {discriminant}") + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + intermediate1 * theta_one_minus_theta + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2) + ) + log_abs_det = torch.log(derivative_numerator) - 2 * torch.log(denominator) + return outputs, -log_abs_det + + +class VitsWaveNet(torch.nn.Module): + def __init__(self, config: VitsConfig, num_layers: int): + super().__init__() + self.hidden_size = config.hidden_size + self.num_layers = num_layers + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.dropout = nn.Dropout(config.wavenet_dropout) + + if hasattr(nn.utils.parametrizations, "weight_norm"): + weight_norm = nn.utils.parametrizations.weight_norm + else: + weight_norm = nn.utils.weight_norm + + if config.speaker_embedding_size != 0: + cond_layer = torch.nn.Conv1d(config.speaker_embedding_size, 2 * config.hidden_size * num_layers, 1) + self.cond_layer = weight_norm(cond_layer, name="weight") + + for i in range(num_layers): + dilation = config.wavenet_dilation_rate**i + padding = (config.wavenet_kernel_size * dilation - dilation) // 2 + in_layer = torch.nn.Conv1d( + in_channels=config.hidden_size, + out_channels=2 * config.hidden_size, + kernel_size=config.wavenet_kernel_size, + dilation=dilation, + padding=padding, + ) + in_layer = weight_norm(in_layer, name="weight") + self.in_layers.append(in_layer) + + # last one is not necessary + if i < num_layers - 1: + res_skip_channels = 2 * config.hidden_size + else: + res_skip_channels = config.hidden_size + + res_skip_layer = torch.nn.Conv1d(config.hidden_size, res_skip_channels, 1) + res_skip_layer = weight_norm(res_skip_layer, name="weight") + self.res_skip_layers.append(res_skip_layer) + + def forward(self, inputs, padding_mask, global_conditioning=None): + outputs = torch.zeros_like(inputs) + num_channels_tensor = torch.IntTensor([self.hidden_size]) + + if global_conditioning is not None: + global_conditioning = self.cond_layer(global_conditioning) + + for i in range(self.num_layers): + hidden_states = self.in_layers[i](inputs) + + if global_conditioning is not None: + cond_offset = i * 2 * self.hidden_size + global_states = global_conditioning[:, cond_offset : cond_offset + 2 * self.hidden_size, :] + else: + global_states = torch.zeros_like(hidden_states) + + acts = fused_add_tanh_sigmoid_multiply(hidden_states, global_states, num_channels_tensor[0]) + acts = self.dropout(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.num_layers - 1: + res_acts = res_skip_acts[:, : self.hidden_size, :] + inputs = (inputs + res_acts) * padding_mask + outputs = outputs + res_skip_acts[:, self.hidden_size :, :] + else: + outputs = outputs + res_skip_acts + + return outputs * padding_mask + + def remove_weight_norm(self): + if self.speaker_embedding_size != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for layer in self.in_layers: + torch.nn.utils.remove_weight_norm(layer) + for layer in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(layer) + + +class VitsPosteriorEncoder(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.out_channels = config.flow_size + + self.conv_pre = nn.Conv1d(config.spectrogram_bins, config.hidden_size, 1) + self.wavenet = VitsWaveNet(config, num_layers=config.posterior_encoder_num_wavenet_layers) + self.conv_proj = nn.Conv1d(config.hidden_size, self.out_channels * 2, 1) + + def forward(self, inputs, padding_mask, global_conditioning=None): + inputs = self.conv_pre(inputs) * padding_mask + inputs = self.wavenet(inputs, padding_mask, global_conditioning) + stats = self.conv_proj(inputs) * padding_mask + mean, log_stddev = torch.split(stats, self.out_channels, dim=1) + sampled = (mean + torch.randn_like(mean) * torch.exp(log_stddev)) * padding_mask + return sampled, mean, log_stddev + + +# Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock +class HifiGanResidualBlock(nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): + super().__init__() + self.leaky_relu_slope = leaky_relu_slope + + self.convs1 = nn.ModuleList( + [ + nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=dilation[i], + padding=self.get_padding(kernel_size, dilation[i]), + ) + for i in range(len(dilation)) + ] + ) + self.convs2 = nn.ModuleList( + [ + nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=1, + padding=self.get_padding(kernel_size, 1), + ) + for _ in range(len(dilation)) + ] + ) + + def get_padding(self, kernel_size, dilation=1): + return (kernel_size * dilation - dilation) // 2 + + def apply_weight_norm(self): + for layer in self.convs1: + nn.utils.weight_norm(layer) + for layer in self.convs2: + nn.utils.weight_norm(layer) + + def remove_weight_norm(self): + for layer in self.convs1: + nn.utils.remove_weight_norm(layer) + for layer in self.convs2: + nn.utils.remove_weight_norm(layer) + + def forward(self, hidden_states): + for conv1, conv2 in zip(self.convs1, self.convs2): + residual = hidden_states + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = conv1(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = conv2(hidden_states) + hidden_states = hidden_states + residual + return hidden_states + + +class VitsHifiGan(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.config = config + self.num_kernels = len(config.resblock_kernel_sizes) + self.num_upsamples = len(config.upsample_rates) + self.conv_pre = nn.Conv1d( + config.flow_size, + config.upsample_initial_channel, + kernel_size=7, + stride=1, + padding=3, + ) + + self.upsampler = nn.ModuleList() + for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)): + self.upsampler.append( + nn.ConvTranspose1d( + config.upsample_initial_channel // (2**i), + config.upsample_initial_channel // (2 ** (i + 1)), + kernel_size=kernel_size, + stride=upsample_rate, + padding=(kernel_size - upsample_rate) // 2, + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.upsampler)): + channels = config.upsample_initial_channel // (2 ** (i + 1)) + for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes): + self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope)) + + self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3, bias=False) + + if config.speaker_embedding_size != 0: + self.cond = nn.Conv1d(config.speaker_embedding_size, config.upsample_initial_channel, 1) + + def apply_weight_norm(self): + for layer in self.upsampler: + nn.utils.weight_norm(layer) + for layer in self.resblocks: + layer.apply_weight_norm() + + def remove_weight_norm(self): + for layer in self.upsampler: + nn.utils.remove_weight_norm(layer) + for layer in self.resblocks: + layer.remove_weight_norm() + + def forward( + self, spectrogram: torch.FloatTensor, global_conditioning: Optional[torch.FloatTensor] = None + ) -> torch.FloatTensor: + r""" + Converts a spectrogram into a speech waveform. + + Args: + spectrogram (`torch.FloatTensor` of shape `(batch_size, config.spectrogram_bins, sequence_length)`): + Tensor containing the spectrograms. + global_conditioning (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_size, 1)`, *optional*): + Tensor containing speaker embeddings, for multispeaker models. + + Returns: + `torch.FloatTensor`: Tensor of shape shape `(batch_size, 1, num_frames)` containing the speech waveform. + """ + hidden_states = self.conv_pre(spectrogram) + + if global_conditioning is not None: + hidden_states = hidden_states + self.cond(global_conditioning) + + for i in range(self.num_upsamples): + hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope) + hidden_states = self.upsampler[i](hidden_states) + + res_state = self.resblocks[i * self.num_kernels](hidden_states) + for j in range(1, self.num_kernels): + res_state += self.resblocks[i * self.num_kernels + j](hidden_states) + hidden_states = res_state / self.num_kernels + + hidden_states = nn.functional.leaky_relu(hidden_states) + hidden_states = self.conv_post(hidden_states) + waveform = torch.tanh(hidden_states) + return waveform + + +class VitsResidualCouplingLayer(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.half_channels = config.flow_size // 2 + + self.conv_pre = nn.Conv1d(self.half_channels, config.hidden_size, 1) + self.wavenet = VitsWaveNet(config, num_layers=config.prior_encoder_num_wavenet_layers) + self.conv_post = nn.Conv1d(config.hidden_size, self.half_channels, 1) + + def forward(self, inputs, padding_mask, global_conditioning=None, reverse=False): + first_half, second_half = torch.split(inputs, [self.half_channels] * 2, dim=1) + hidden_states = self.conv_pre(first_half) * padding_mask + hidden_states = self.wavenet(hidden_states, padding_mask, global_conditioning) + mean = self.conv_post(hidden_states) * padding_mask + log_stddev = torch.zeros_like(mean) + + if not reverse: + second_half = mean + second_half * torch.exp(log_stddev) * padding_mask + outputs = torch.cat([first_half, second_half], dim=1) + log_determinant = torch.sum(log_stddev, [1, 2]) + return outputs, log_determinant + else: + second_half = (second_half - mean) * torch.exp(-log_stddev) * padding_mask + outputs = torch.cat([first_half, second_half], dim=1) + return outputs, None + + +class VitsResidualCouplingBlock(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.flows = nn.ModuleList() + for _ in range(config.prior_encoder_num_flows): + self.flows.append(VitsResidualCouplingLayer(config)) + + def forward(self, inputs, padding_mask, global_conditioning=None, reverse=False): + if not reverse: + for flow in self.flows: + inputs, _ = flow(inputs, padding_mask, global_conditioning) + inputs = torch.flip(inputs, [1]) + else: + for flow in reversed(self.flows): + inputs = torch.flip(inputs, [1]) + inputs, _ = flow(inputs, padding_mask, global_conditioning, reverse=True) + return inputs + + +class VitsDilatedDepthSeparableConv(nn.Module): + def __init__(self, config: VitsConfig, dropout_rate=0.0): + super().__init__() + kernel_size = config.duration_predictor_kernel_size + channels = config.hidden_size + self.num_layers = config.depth_separable_num_layers + + self.dropout = nn.Dropout(dropout_rate) + self.convs_dilated = nn.ModuleList() + self.convs_pointwise = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(self.num_layers): + dilation = kernel_size**i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_dilated.append( + nn.Conv1d( + in_channels=channels, + out_channels=channels, + kernel_size=kernel_size, + groups=channels, + dilation=dilation, + padding=padding, + ) + ) + self.convs_pointwise.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(nn.LayerNorm(channels)) + self.norms_2.append(nn.LayerNorm(channels)) + + def forward(self, inputs, padding_mask, global_conditioning=None): + if global_conditioning is not None: + inputs = inputs + global_conditioning + + for i in range(self.num_layers): + hidden_states = self.convs_dilated[i](inputs * padding_mask) + hidden_states = self.norms_1[i](hidden_states.transpose(1, -1)).transpose(1, -1) + hidden_states = nn.functional.gelu(hidden_states) + hidden_states = self.convs_pointwise[i](hidden_states) + hidden_states = self.norms_2[i](hidden_states.transpose(1, -1)).transpose(1, -1) + hidden_states = nn.functional.gelu(hidden_states) + hidden_states = self.dropout(hidden_states) + inputs = inputs + hidden_states + + return inputs * padding_mask + + +class VitsConvFlow(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.filter_channels = config.hidden_size + self.half_channels = config.depth_separable_channels // 2 + self.num_bins = config.duration_predictor_flow_bins + self.tail_bound = config.duration_predictor_tail_bound + + self.conv_pre = nn.Conv1d(self.half_channels, self.filter_channels, 1) + self.conv_dds = VitsDilatedDepthSeparableConv(config) + self.conv_proj = nn.Conv1d(self.filter_channels, self.half_channels * (self.num_bins * 3 - 1), 1) + + def forward(self, inputs, padding_mask, global_conditioning=None, reverse=False): + first_half, second_half = torch.split(inputs, [self.half_channels] * 2, dim=1) + + hidden_states = self.conv_pre(first_half) + hidden_states = self.conv_dds(hidden_states, padding_mask, global_conditioning) + hidden_states = self.conv_proj(hidden_states) * padding_mask + + batch_size, channels, length = first_half.shape + hidden_states = hidden_states.reshape(batch_size, channels, -1, length).permute(0, 1, 3, 2) + + unnormalized_widths = hidden_states[..., : self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = hidden_states[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = hidden_states[..., 2 * self.num_bins :] + + second_half, log_abs_det = _unconstrained_rational_quadratic_spline( + second_half, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + reverse=reverse, + tail_bound=self.tail_bound, + ) + + outputs = torch.cat([first_half, second_half], dim=1) * padding_mask + if not reverse: + log_determinant = torch.sum(log_abs_det * padding_mask, [1, 2]) + return outputs, log_determinant + else: + return outputs, None + + +class VitsElementwiseAffine(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.channels = config.depth_separable_channels + self.translate = nn.Parameter(torch.zeros(self.channels, 1)) + self.log_scale = nn.Parameter(torch.zeros(self.channels, 1)) + + def forward(self, inputs, padding_mask, global_conditioning=None, reverse=False): + if not reverse: + outputs = self.translate + torch.exp(self.log_scale) * inputs + outputs = outputs * padding_mask + log_determinant = torch.sum(self.log_scale * padding_mask, [1, 2]) + return outputs, log_determinant + else: + outputs = (inputs - self.translate) * torch.exp(-self.log_scale) * padding_mask + return outputs, None + + +class VitsStochasticDurationPredictor(nn.Module): + def __init__(self, config): + super().__init__() + embed_dim = config.speaker_embedding_size + filter_channels = config.hidden_size + + self.conv_pre = nn.Conv1d(filter_channels, filter_channels, 1) + self.conv_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.conv_dds = VitsDilatedDepthSeparableConv( + config, + dropout_rate=config.duration_predictor_dropout, + ) + + if embed_dim != 0: + self.cond = nn.Conv1d(embed_dim, filter_channels, 1) + + self.flows = nn.ModuleList() + self.flows.append(VitsElementwiseAffine(config)) + for _ in range(config.duration_predictor_num_flows): + self.flows.append(VitsConvFlow(config)) + + self.post_conv_pre = nn.Conv1d(1, filter_channels, 1) + self.post_conv_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_conv_dds = VitsDilatedDepthSeparableConv( + config, + dropout_rate=config.duration_predictor_dropout, + ) + + self.post_flows = nn.ModuleList() + self.post_flows.append(VitsElementwiseAffine(config)) + for _ in range(config.duration_predictor_num_flows): + self.post_flows.append(VitsConvFlow(config)) + + def forward(self, inputs, padding_mask, global_conditioning=None, durations=None, reverse=False, noise_scale=1.0): + inputs = torch.detach(inputs) + inputs = self.conv_pre(inputs) + + if global_conditioning is not None: + global_conditioning = torch.detach(global_conditioning) + inputs = inputs + self.cond(global_conditioning) + + inputs = self.conv_dds(inputs, padding_mask) + inputs = self.conv_proj(inputs) * padding_mask + + if not reverse: + hidden_states = self.post_conv_pre(durations) + hidden_states = self.post_conv_dds(hidden_states, padding_mask) + hidden_states = self.post_conv_proj(hidden_states) * padding_mask + + random_posterior = ( + torch.randn(durations.size(0), 2, durations.size(2)).to(device=inputs.device, dtype=inputs.dtype) + * padding_mask + ) + log_determinant_posterior_sum = 0 + latents_posterior = random_posterior + for flow in self.post_flows: + latents_posterior, log_determinant = flow( + latents_posterior, padding_mask, global_conditioning=inputs + hidden_states + ) + latents_posterior = torch.flip(latents_posterior, [1]) + log_determinant_posterior_sum += log_determinant + + first_half, second_half = torch.split(latents_posterior, [1, 1], dim=1) + + log_determinant_posterior_sum += torch.sum( + (nn.functional.logsigmoid(first_half) + nn.functional.logsigmoid(-first_half)) * padding_mask, [1, 2] + ) + logq = ( + torch.sum(-0.5 * (math.log(2 * math.pi) + (random_posterior**2)) * padding_mask, [1, 2]) + - log_determinant_posterior_sum + ) + + first_half = (durations - torch.sigmoid(first_half)) * padding_mask + first_half = torch.log(torch.clamp_min(first_half, 1e-5)) * padding_mask + log_determinant_sum = torch.sum(-first_half, [1, 2]) + + latents = torch.cat([first_half, second_half], dim=1) + for flow in self.flows: + latents, log_determinant = flow(latents, padding_mask, global_conditioning=inputs) + latents = torch.flip(latents, [1]) + log_determinant_sum += log_determinant + + nll = torch.sum(0.5 * (math.log(2 * math.pi) + (latents**2)) * padding_mask, [1, 2]) - log_determinant_sum + return nll + logq + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + + latents = ( + torch.randn(inputs.size(0), 2, inputs.size(2)).to(device=inputs.device, dtype=inputs.dtype) + * noise_scale + ) + for flow in flows: + latents = torch.flip(latents, [1]) + latents, _ = flow(latents, padding_mask, global_conditioning=inputs, reverse=True) + + log_duration, _ = torch.split(latents, [1, 1], dim=1) + return log_duration + + +class VitsDurationPredictor(nn.Module): + def __init__(self, config): + super().__init__() + kernel_size = config.duration_predictor_kernel_size + filter_channels = config.duration_predictor_filter_channels + + self.dropout = nn.Dropout(config.duration_predictor_dropout) + self.conv_1 = nn.Conv1d(config.hidden_size, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_1 = nn.LayerNorm(filter_channels, eps=config.layer_norm_eps) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_2 = nn.LayerNorm(filter_channels, eps=config.layer_norm_eps) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if config.speaker_embedding_size != 0: + self.cond = nn.Conv1d(config.speaker_embedding_size, config.hidden_size, 1) + + def forward(self, inputs, padding_mask, global_conditioning=None): + inputs = torch.detach(inputs) + + if global_conditioning is not None: + global_conditioning = torch.detach(global_conditioning) + inputs = inputs + self.cond(global_conditioning) + + inputs = self.conv_1(inputs * padding_mask) + inputs = torch.relu(inputs) + inputs = self.norm_1(inputs.transpose(1, -1)).transpose(1, -1) + inputs = self.dropout(inputs) + + inputs = self.conv_2(inputs * padding_mask) + inputs = torch.relu(inputs) + inputs = self.norm_2(inputs.transpose(1, -1)).transpose(1, -1) + inputs = self.dropout(inputs) + + inputs = self.proj(inputs * padding_mask) + return inputs * padding_mask + + +class VitsAttention(nn.Module): + """Multi-headed attention with relative positional representation.""" + + def __init__(self, config: VitsConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.dropout = config.attention_dropout + self.window_size = config.window_size + + self.head_dim = self.embed_dim // self.num_heads + self.scaling = self.head_dim**-0.5 + + if (self.head_dim * self.num_heads) != self.embed_dim: + raise ValueError( + f"hidden_size must be divisible by num_attention_heads (got `hidden_size`: {self.embed_dim}" + f" and `num_attention_heads`: {self.num_heads})." + ) + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias) + + if self.window_size: + self.emb_rel_k = nn.Parameter(torch.randn(1, self.window_size * 2 + 1, self.head_dim) * self.scaling) + self.emb_rel_v = nn.Parameter(torch.randn(1, self.window_size * 2 + 1, self.head_dim) * self.scaling) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if self.window_size is not None: + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, src_len) + relative_logits = torch.matmul(query_states, key_relative_embeddings.transpose(-2, -1)) + rel_pos_bias = self._relative_position_to_absolute_position(relative_logits) + attn_weights += rel_pos_bias + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + if self.window_size is not None: + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, src_len) + relative_weights = self._absolute_position_to_relative_position(attn_probs) + rel_pos_bias = torch.matmul(relative_weights, value_relative_embeddings) + attn_output += rel_pos_bias + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned aross GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + def _get_relative_embeddings(self, relative_embeddings, length): + pad_length = max(length - (self.window_size + 1), 0) + if pad_length > 0: + relative_embeddings = nn.functional.pad(relative_embeddings, [0, 0, pad_length, pad_length, 0, 0]) + + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + return relative_embeddings[:, slice_start_position:slice_end_position] + + def _relative_position_to_absolute_position(self, x): + batch_heads, length, _ = x.size() + + # Concat columns of pad to shift from relative to absolute indexing. + x = nn.functional.pad(x, [0, 1, 0, 0, 0, 0]) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch_heads, length * 2 * length]) + x_flat = nn.functional.pad(x_flat, [0, length - 1, 0, 0]) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch_heads, length + 1, 2 * length - 1]) + x_final = x_final[:, :length, length - 1 :] + return x_final + + def _absolute_position_to_relative_position(self, x): + batch_heads, length, _ = x.size() + + # Pad along column + x = nn.functional.pad(x, [0, length - 1, 0, 0, 0, 0]) + x_flat = x.view([batch_heads, length * (2 * length - 1)]) + + # Add 0's in the beginning that will skew the elements after reshape + x_flat = nn.functional.pad(x_flat, [length, 0, 0, 0]) + x_final = x_flat.view([batch_heads, length, 2 * length])[:, :, 1:] + return x_final + + +class VitsFeedForward(nn.Module): + def __init__(self, config): + super().__init__() + self.conv_1 = nn.Conv1d(config.hidden_size, config.ffn_dim, config.ffn_kernel_size) + self.conv_2 = nn.Conv1d(config.ffn_dim, config.hidden_size, config.ffn_kernel_size) + self.dropout = nn.Dropout(config.activation_dropout) + + if isinstance(config.hidden_act, str): + self.act_fn = ACT2FN[config.hidden_act] + else: + self.act_fn = config.hidden_act + + if config.ffn_kernel_size > 1: + pad_left = (config.ffn_kernel_size - 1) // 2 + pad_right = config.ffn_kernel_size // 2 + self.padding = [pad_left, pad_right, 0, 0, 0, 0] + else: + self.padding = None + + def forward(self, hidden_states, padding_mask): + hidden_states = hidden_states.permute(0, 2, 1) + padding_mask = padding_mask.permute(0, 2, 1) + + hidden_states = hidden_states * padding_mask + if self.padding is not None: + hidden_states = nn.functional.pad(hidden_states, self.padding) + + hidden_states = self.conv_1(hidden_states) + hidden_states = self.act_fn(hidden_states) + hidden_states = self.dropout(hidden_states) + + hidden_states = hidden_states * padding_mask + if self.padding is not None: + hidden_states = nn.functional.pad(hidden_states, self.padding) + + hidden_states = self.conv_2(hidden_states) + hidden_states = hidden_states * padding_mask + + hidden_states = hidden_states.permute(0, 2, 1) + return hidden_states + + +class VitsEncoderLayer(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.attention = VitsAttention(config) + self.dropout = nn.Dropout(config.hidden_dropout) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.feed_forward = VitsFeedForward(config) + self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + padding_mask: torch.FloatTensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ): + residual = hidden_states + hidden_states, attn_weights = self.attention( + hidden_states=hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = self.dropout(hidden_states) + hidden_states = self.layer_norm(residual + hidden_states) + + residual = hidden_states + hidden_states = self.feed_forward(hidden_states, padding_mask) + hidden_states = self.dropout(hidden_states) + hidden_states = self.final_layer_norm(residual + hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class VitsEncoder(nn.Module): + def __init__(self, config: VitsConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([VitsEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + self.layerdrop = config.layerdrop + + def forward( + self, + hidden_states: torch.FloatTensor, + padding_mask: torch.FloatTensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) + + hidden_states = hidden_states * padding_mask + + deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() + + for encoder_layer in self.layers: + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = np.random.uniform(0, 1) + + skip_the_layer = self.training and (dropout_probability < self.layerdrop) + if not skip_the_layer or deepspeed_zero3_is_enabled: + # under deepspeed zero3 all gpus must run in sync + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + padding_mask, + attention_mask, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask=attention_mask, + padding_mask=padding_mask, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if skip_the_layer: + layer_outputs = (None, None) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + hidden_states = hidden_states * padding_mask + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class VitsTextEncoder(nn.Module): + """ + Transformer encoder that uses relative positional representation instead of absolute positional encoding. + """ + + def __init__(self, config: VitsConfig): + super().__init__() + self.config = config + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) + self.encoder = VitsEncoder(config) + self.project = nn.Conv1d(config.hidden_size, config.flow_size * 2, kernel_size=1) + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.Tensor, + padding_mask: torch.FloatTensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], VitsTextEncoderOutput]: + hidden_states = self.embed_tokens(input_ids) * math.sqrt(self.config.hidden_size) + + encoder_outputs = self.encoder( + hidden_states=hidden_states, + padding_mask=padding_mask, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] if not return_dict else encoder_outputs.last_hidden_state + + stats = self.project(last_hidden_state.transpose(1, 2)).transpose(1, 2) * padding_mask + prior_means, prior_log_variances = torch.split(stats, self.config.flow_size, dim=2) + + if not return_dict: + outputs = (last_hidden_state, prior_means, prior_log_variances) + encoder_outputs[1:] + return outputs + + return VitsTextEncoderOutput( + last_hidden_state=last_hidden_state, + prior_means=prior_means, + prior_log_variances=prior_log_variances, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class VitsPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = VitsConfig + base_model_prefix = "vits" + main_input_name = "input_ids" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, nn.Conv1d): + nn.init.kaiming_normal_(module.weight) + if module.bias is not None: + k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) + nn.init.uniform_(module.bias, a=-k, b=k) + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +VITS_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`VitsConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +VITS_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, + 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + speaker_id (`int`, *optional*): + Which speaker embedding to use. Only used for multispeaker models. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The complete VITS model, for text-to-speech synthesis.", + VITS_START_DOCSTRING, +) +class VitsModel(VitsPreTrainedModel): + def __init__(self, config: VitsConfig): + super().__init__(config) + self.config = config + self.text_encoder = VitsTextEncoder(config) + self.flow = VitsResidualCouplingBlock(config) + self.decoder = VitsHifiGan(config) + + if config.use_stochastic_duration_prediction: + self.duration_predictor = VitsStochasticDurationPredictor(config) + else: + self.duration_predictor = VitsDurationPredictor(config) + + if config.num_speakers > 1: + self.embed_speaker = nn.Embedding(config.num_speakers, config.speaker_embedding_size) + + # This is used only for training. + self.posterior_encoder = VitsPosteriorEncoder(config) + + # These parameters control the synthesised speech properties + self.speaking_rate = config.speaking_rate + self.noise_scale = config.noise_scale + self.noise_scale_duration = config.noise_scale_duration + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.text_encoder + + @add_start_docstrings_to_model_forward(VITS_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=VitsModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + speaker_id: Optional[int] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[torch.FloatTensor] = None, + ) -> Union[Tuple[Any], VitsModelOutput]: + r""" + labels (`torch.FloatTensor` of shape `(batch_size, config.spectrogram_bins, sequence_length)`, *optional*): + Float values of target spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss + computation. + + Returns: + + Example: + + ```python + >>> from transformers import VitsTokenizer, VitsModel, set_seed + >>> import torch + + >>> tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") + >>> model = VitsModel.from_pretrained("facebook/mms-tts-eng") + + >>> inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt") + + >>> set_seed(555) # make deterministic + + >>> with torch.no_grad(): + ... outputs = model(inputs["input_ids"]) + >>> outputs.waveform.shape + torch.Size([1, 45824]) + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if attention_mask is not None: + input_padding_mask = attention_mask.unsqueeze(-1).float() + else: + input_padding_mask = torch.ones_like(input_ids).unsqueeze(-1).float() + + if self.config.num_speakers > 1 and speaker_id is not None: + if not 0 <= speaker_id < self.config.num_speakers: + raise ValueError(f"Set `speaker_id` in the range 0-{self.config.num_speakers - 1}.") + if isinstance(speaker_id, int): + speaker_id = torch.full(size=(1,), fill_value=speaker_id, device=self.device) + speaker_embeddings = self.embed_speaker(speaker_id).unsqueeze(-1) + else: + speaker_embeddings = None + + if labels is not None: + raise NotImplementedError("Training of VITS is not supported yet.") + + text_encoder_output = self.text_encoder( + input_ids=input_ids, + padding_mask=input_padding_mask, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state + hidden_states = hidden_states.transpose(1, 2) + input_padding_mask = input_padding_mask.transpose(1, 2) + prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means + prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances + + if self.config.use_stochastic_duration_prediction: + log_duration = self.duration_predictor( + hidden_states, + input_padding_mask, + speaker_embeddings, + reverse=True, + noise_scale=self.noise_scale_duration, + ) + else: + log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings) + + length_scale = 1.0 / self.speaking_rate + duration = torch.ceil(torch.exp(log_duration) * input_padding_mask * length_scale) + predicted_lengths = torch.clamp_min(torch.sum(duration, [1, 2]), 1).long() + + # Create a padding mask for the output lengths of shape (batch, 1, max_output_length) + indices = torch.arange(predicted_lengths.max(), dtype=predicted_lengths.dtype, device=predicted_lengths.device) + output_padding_mask = indices.unsqueeze(0) < predicted_lengths.unsqueeze(1) + output_padding_mask = output_padding_mask.unsqueeze(1).to(input_padding_mask.dtype) + + # Reconstruct an attention tensor of shape (batch, 1, out_length, in_length) + attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(output_padding_mask, -1) + batch_size, _, output_length, input_length = attn_mask.shape + cum_duration = torch.cumsum(duration, -1).view(batch_size * input_length, 1) + indices = torch.arange(output_length, dtype=duration.dtype, device=duration.device) + valid_indices = indices.unsqueeze(0) < cum_duration + valid_indices = valid_indices.to(attn_mask.dtype).view(batch_size, input_length, output_length) + padded_indices = valid_indices - nn.functional.pad(valid_indices, [0, 0, 1, 0, 0, 0])[:, :-1] + attn = padded_indices.unsqueeze(1).transpose(2, 3) * attn_mask + + # Expand prior distribution + prior_means = torch.matmul(attn.squeeze(1), prior_means).transpose(1, 2) + prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances).transpose(1, 2) + + prior_latents = prior_means + torch.randn_like(prior_means) * torch.exp(prior_log_variances) * self.noise_scale + latents = self.flow(prior_latents, output_padding_mask, speaker_embeddings, reverse=True) + + spectrogram = latents * output_padding_mask + waveform = self.decoder(spectrogram, speaker_embeddings) + waveform = waveform.squeeze(1) + sequence_lengths = predicted_lengths * np.prod(self.config.upsample_rates) + + if not return_dict: + outputs = (waveform, sequence_lengths, spectrogram) + text_encoder_output[3:] + return outputs + + return VitsModelOutput( + waveform=waveform, + sequence_lengths=sequence_lengths, + spectrogram=spectrogram, + hidden_states=text_encoder_output.hidden_states, + attentions=text_encoder_output.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vits/tokenization_vits.py b/venv/lib/python3.10/site-packages/transformers/models/vits/tokenization_vits.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b115c176bcefa5c297049e23be899553bf4066 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vits/tokenization_vits.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2023 The Kakao Enterprise Authors, the MMS-TTS Authors and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization class for VITS.""" + + +import json +import os +import re +from typing import Any, Dict, List, Optional, Tuple, Union + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import is_phonemizer_available, logging + + +if is_phonemizer_available(): + import phonemizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"} + + +def has_non_roman_characters(input_string): + # Find any character outside the ASCII range + non_roman_pattern = re.compile(r"[^\x00-\x7F]") + + # Search the input string for non-Roman characters + match = non_roman_pattern.search(input_string) + has_non_roman = match is not None + return has_non_roman + + +class VitsTokenizer(PreTrainedTokenizer): + """ + Construct a VITS tokenizer. Also supports MMS-TTS. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + language (`str`, *optional*): + Language identifier. + add_blank (`bool`, *optional*, defaults to `True`): + Whether to insert token id 0 in between the other tokens. + normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the input text by removing all casing and punctuation. + phonemize (`bool`, *optional*, defaults to `True`): + Whether to convert the input text into phonemes. + is_uroman (`bool`, *optional*, defaults to `False`): + Whether the `uroman` Romanizer needs to be applied to the input text prior to tokenizing. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + pad_token="", + unk_token="", + language=None, + add_blank=True, + normalize=True, + phonemize=True, + is_uroman=False, + **kwargs, + ) -> None: + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + + self.decoder = {v: k for k, v in self.encoder.items()} + self.language = language + self.add_blank = add_blank + self.normalize = normalize + self.phonemize = phonemize + + self.is_uroman = is_uroman + + super().__init__( + pad_token=pad_token, + unk_token=unk_token, + language=language, + add_blank=add_blank, + normalize=normalize, + phonemize=phonemize, + is_uroman=is_uroman, + **kwargs, + ) + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def normalize_text(self, input_string): + """Lowercase the input string, respecting any special token ids that may be part or entirely upper-cased.""" + all_vocabulary = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys()) + filtered_text = "" + + i = 0 + while i < len(input_string): + found_match = False + for word in all_vocabulary: + if input_string[i : i + len(word)] == word: + filtered_text += word + i += len(word) + found_match = True + break + + if not found_match: + filtered_text += input_string[i].lower() + i += 1 + + return filtered_text + + def _preprocess_char(self, text): + """Special treatment of characters in certain languages""" + if self.language == "ron": + text = text.replace("ț", "ţ") + return text + + def prepare_for_tokenization( + self, text: str, is_split_into_words: bool = False, normalize: Optional[bool] = None, **kwargs + ) -> Tuple[str, Dict[str, Any]]: + """ + Performs any necessary transformations before tokenization. + + This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the + `kwargs` at the end of the encoding process to be sure all the arguments have been used. + + Args: + text (`str`): + The text to prepare. + is_split_into_words (`bool`, *optional*, defaults to `False`): + Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the + tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) + which it will tokenize. + normalize (`bool`, *optional*, defaults to `None`): + Whether or not to apply punctuation and casing normalization to the text inputs. Typically, VITS is + trained on lower-cased and un-punctuated text. Hence, normalization is used to ensure that the input + text consists only of lower-case characters. + kwargs (`Dict[str, Any]`, *optional*): + Keyword arguments to use for the tokenization. + + Returns: + `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs. + """ + normalize = normalize if normalize is not None else self.normalize + + if normalize: + # normalise for casing + text = self.normalize_text(text) + + filtered_text = self._preprocess_char(text) + + if has_non_roman_characters(filtered_text) and self.is_uroman: + logger.warning( + "Text to the tokenizer contains non-Roman characters. Ensure the `uroman` Romanizer is " + "applied to the text prior to passing it to the tokenizer. See " + "`https://github.com/isi-nlp/uroman` for details." + ) + + if self.phonemize: + if not is_phonemizer_available(): + raise ImportError("Please install the `phonemizer` Python package to use this tokenizer.") + + filtered_text = phonemizer.phonemize( + filtered_text, + language="en-us", + backend="espeak", + strip=True, + preserve_punctuation=True, + with_stress=True, + ) + filtered_text = re.sub(r"\s+", " ", filtered_text) + elif normalize: + # strip any chars outside of the vocab (punctuation) + filtered_text = "".join(list(filter(lambda char: char in self.encoder, filtered_text))).strip() + + return filtered_text, kwargs + + def _tokenize(self, text: str) -> List[str]: + """Tokenize a string by inserting the `` token at the boundary between adjacent characters.""" + tokens = list(text) + + if self.add_blank: + interspersed = [self._convert_id_to_token(0)] * (len(tokens) * 2 + 1) + interspersed[1::2] = tokens + tokens = interspersed + + return tokens + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + if self.add_blank and len(tokens) > 1: + tokens = tokens[1::2] + return "".join(tokens) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Union[Tuple[str], None]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + return (vocab_file,)