Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- ckpts/universal/global_step20/zero/13.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/13.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/13.post_attention_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- lm-evaluation-harness/tests/testdata/anli_r2-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_object_extraction-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/ethics_justice-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-jurisprudence-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-world_religions-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/logiqa-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/multirc-v1-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/pile_europarl-v0-loglikelihood_rolling +1 -0
- lm-evaluation-harness/tests/testdata/pile_pile-cc-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/pile_uspto-v1-loglikelihood_rolling +1 -0
- lm-evaluation-harness/tests/testdata/race-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/wmt20-en-iu-v0-greedy_until +1 -0
- lm-evaluation-harness/tests/testdata/wmt20-iu-en-v0-greedy_until +1 -0
- lm-evaluation-harness/tests/testdata/wmt20-iu-en-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/wmt20-km-en-v0-res.json +1 -0
- venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 +3 -0
- venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/convert_convnextv2_to_pytorch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__init__.py +85 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/configuration_mobilenet_v1.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/feature_extraction_mobilenet_v1.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/image_processing_mobilenet_v1.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +126 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py +142 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py +33 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +326 -0
- venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +482 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__init__.py +122 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/configuration_pop2piano.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/convert_pop2piano_weights_to_hf.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/feature_extraction_pop2piano.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/modeling_pop2piano.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/processing_pop2piano.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/tokenization_pop2piano.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/models/pop2piano/configuration_pop2piano.py +128 -0
.gitattributes
CHANGED
@@ -88,3 +88,4 @@ venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lf
|
|
88 |
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
|
89 |
venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
|
90 |
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
88 |
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
|
89 |
venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
|
90 |
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
|
91 |
+
venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
|
ckpts/universal/global_step20/zero/13.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e9f0cbfd9aefa690c7b3e0e7d80a9f1335bc3f63e96562fa2c4acf142989a86
|
3 |
+
size 9372
|
ckpts/universal/global_step20/zero/13.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27a9cbb8e5f303316fce2973a8f0a74a43a81dcde7da2abc0c2cf1d5bfc477a5
|
3 |
+
size 9387
|
ckpts/universal/global_step20/zero/13.post_attention_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bb1f75c0b74d7ccb5b8ea9c854895377cf61b7775fab3ac93f715d60126c90f
|
3 |
+
size 9293
|
ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9a2b5e74a555f20a71f81131c099b52967a6c199141d6da88758acd71380c64
|
3 |
+
size 33555612
|
ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf248dd22e0cb43a601b9d68269acafa7f03dcb62ec18db84c5fb3bf6db8dee9
|
3 |
+
size 33555627
|
ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac084b66a811ddf12d1f1d3adf1ecff5f12ea0a23a360b44ac183a5ddccbd0f2
|
3 |
+
size 33555533
|
ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9014c84d7113b83eeba7da3a7d6d1e495be6bf0cacf953e3f78d9e0b314c94cb
|
3 |
+
size 33555627
|
lm-evaluation-harness/tests/testdata/anli_r2-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
d0ea3c3e09d533982c15b4c034439896d6af4bbafb2254d305e20215534a251d
|
lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_object_extraction-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_coordinate_structure_constraint_object_extraction": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_coordinate_structure_constraint_object_extraction": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_principle_A_domain_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_1": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_sentential_negation_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_negation_npi_licensor_present": 0}}
|
lm-evaluation-harness/tests/testdata/ethics_justice-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"ethics_justice": {"acc": 0.49556213017751477, "acc_stderr": 0.009616784279885177, "em": 0.057692307692307696}}, "versions": {"ethics_justice": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-jurisprudence-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
cac440189f1ec778e82f4975d88b74689553ecc5116aaa7f76587a50c1a610e0
|
lm-evaluation-harness/tests/testdata/hendrycksTest-world_religions-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
97a0f68ba30ea3a6ef1db1a2925c964b09ecc54455a0a930da083e52677815bd
|
lm-evaluation-harness/tests/testdata/logiqa-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
12495c50454ba5e1ce0753bd18c09aaca516bebd27648d815e37b15229dbf198
|
lm-evaluation-harness/tests/testdata/multirc-v1-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
0e793bd6f637a70a04c6f2cda080188fc037961b2f909095fe63f7bdbc4a90c6
|
lm-evaluation-harness/tests/testdata/pile_europarl-v0-loglikelihood_rolling
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
e67d3dbccd47d308bfc5b0e66b76d0dfc5e386ebfa94e056562c2281c395543f
|
lm-evaluation-harness/tests/testdata/pile_pile-cc-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"pile_pile-cc": {"bits_per_byte": 0.00011234131907228174, "byte_perplexity": 1.0001123476295946, "word_perplexity": 1.0006738958554477}}, "versions": {"pile_pile-cc": 0}}
|
lm-evaluation-harness/tests/testdata/pile_uspto-v1-loglikelihood_rolling
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
789b2bdb31564d512b70f801316f49320a26c83ba361226bac0afb255341d477
|
lm-evaluation-harness/tests/testdata/race-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
bdfdfab7fa1c7af0c1e161785e347b1b8071a15cbf971f6f2a9ae8c8e845199f
|
lm-evaluation-harness/tests/testdata/wmt20-en-iu-v0-greedy_until
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
f5688199890a48f73f2cc04a2152e35190f0e0ddd40e629fa24ee39d423ea389
|
lm-evaluation-harness/tests/testdata/wmt20-iu-en-v0-greedy_until
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
97bf664a8efa54b5366b8341f77b418106dd0cb26169d5b2d0144e4d3d2bc5c9
|
lm-evaluation-harness/tests/testdata/wmt20-iu-en-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"wmt20-iu-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.012204628007572778, "chrf_stderr": 8.944407532175802e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-iu-en": 0}}
|
lm-evaluation-harness/tests/testdata/wmt20-km-en-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"wmt20-km-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.015142474534585969, "chrf_stderr": 0.0001518735048829897, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-km-en": 0}}
|
venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cbd1488bbef82b8d64c2dd51a9b6aef3f6b0bc3a7a80a9821be48d1c07f700e7
|
3 |
+
size 53594512
|
venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.28 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc
ADDED
Binary file (5.07 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/convert_convnextv2_to_pytorch.cpython-310.pyc
ADDED
Binary file (9.13 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc
ADDED
Binary file (22.2 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__init__.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {
|
20 |
+
"configuration_mobilenet_v1": [
|
21 |
+
"MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
22 |
+
"MobileNetV1Config",
|
23 |
+
"MobileNetV1OnnxConfig",
|
24 |
+
],
|
25 |
+
}
|
26 |
+
|
27 |
+
try:
|
28 |
+
if not is_vision_available():
|
29 |
+
raise OptionalDependencyNotAvailable()
|
30 |
+
except OptionalDependencyNotAvailable:
|
31 |
+
pass
|
32 |
+
else:
|
33 |
+
_import_structure["feature_extraction_mobilenet_v1"] = ["MobileNetV1FeatureExtractor"]
|
34 |
+
_import_structure["image_processing_mobilenet_v1"] = ["MobileNetV1ImageProcessor"]
|
35 |
+
|
36 |
+
try:
|
37 |
+
if not is_torch_available():
|
38 |
+
raise OptionalDependencyNotAvailable()
|
39 |
+
except OptionalDependencyNotAvailable:
|
40 |
+
pass
|
41 |
+
else:
|
42 |
+
_import_structure["modeling_mobilenet_v1"] = [
|
43 |
+
"MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST",
|
44 |
+
"MobileNetV1ForImageClassification",
|
45 |
+
"MobileNetV1Model",
|
46 |
+
"MobileNetV1PreTrainedModel",
|
47 |
+
"load_tf_weights_in_mobilenet_v1",
|
48 |
+
]
|
49 |
+
|
50 |
+
|
51 |
+
if TYPE_CHECKING:
|
52 |
+
from .configuration_mobilenet_v1 import (
|
53 |
+
MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
54 |
+
MobileNetV1Config,
|
55 |
+
MobileNetV1OnnxConfig,
|
56 |
+
)
|
57 |
+
|
58 |
+
try:
|
59 |
+
if not is_vision_available():
|
60 |
+
raise OptionalDependencyNotAvailable()
|
61 |
+
except OptionalDependencyNotAvailable:
|
62 |
+
pass
|
63 |
+
else:
|
64 |
+
from .feature_extraction_mobilenet_v1 import MobileNetV1FeatureExtractor
|
65 |
+
from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor
|
66 |
+
|
67 |
+
try:
|
68 |
+
if not is_torch_available():
|
69 |
+
raise OptionalDependencyNotAvailable()
|
70 |
+
except OptionalDependencyNotAvailable:
|
71 |
+
pass
|
72 |
+
else:
|
73 |
+
from .modeling_mobilenet_v1 import (
|
74 |
+
MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST,
|
75 |
+
MobileNetV1ForImageClassification,
|
76 |
+
MobileNetV1Model,
|
77 |
+
MobileNetV1PreTrainedModel,
|
78 |
+
load_tf_weights_in_mobilenet_v1,
|
79 |
+
)
|
80 |
+
|
81 |
+
|
82 |
+
else:
|
83 |
+
import sys
|
84 |
+
|
85 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.37 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/configuration_mobilenet_v1.cpython-310.pyc
ADDED
Binary file (4.88 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/feature_extraction_mobilenet_v1.cpython-310.pyc
ADDED
Binary file (1.06 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/image_processing_mobilenet_v1.cpython-310.pyc
ADDED
Binary file (12.9 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" MobileNetV1 model configuration"""
|
16 |
+
|
17 |
+
from collections import OrderedDict
|
18 |
+
from typing import Mapping
|
19 |
+
|
20 |
+
from packaging import version
|
21 |
+
|
22 |
+
from ...configuration_utils import PretrainedConfig
|
23 |
+
from ...onnx import OnnxConfig
|
24 |
+
from ...utils import logging
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
from ..deprecated._archive_maps import MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
31 |
+
|
32 |
+
|
33 |
+
class MobileNetV1Config(PretrainedConfig):
|
34 |
+
r"""
|
35 |
+
This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a
|
36 |
+
MobileNetV1 model according to the specified arguments, defining the model architecture. Instantiating a
|
37 |
+
configuration with the defaults will yield a similar configuration to that of the MobileNetV1
|
38 |
+
[google/mobilenet_v1_1.0_224](https://huggingface.co/google/mobilenet_v1_1.0_224) architecture.
|
39 |
+
|
40 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
41 |
+
documentation from [`PretrainedConfig`] for more information.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
num_channels (`int`, *optional*, defaults to 3):
|
45 |
+
The number of input channels.
|
46 |
+
image_size (`int`, *optional*, defaults to 224):
|
47 |
+
The size (resolution) of each image.
|
48 |
+
depth_multiplier (`float`, *optional*, defaults to 1.0):
|
49 |
+
Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
|
50 |
+
channels. This is sometimes also called "alpha" or "width multiplier".
|
51 |
+
min_depth (`int`, *optional*, defaults to 8):
|
52 |
+
All layers will have at least this many channels.
|
53 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
|
54 |
+
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
|
55 |
+
tf_padding (`bool`, *optional*, defaults to `True`):
|
56 |
+
Whether to use TensorFlow padding rules on the convolution layers.
|
57 |
+
classifier_dropout_prob (`float`, *optional*, defaults to 0.999):
|
58 |
+
The dropout ratio for attached classifiers.
|
59 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
60 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
61 |
+
layer_norm_eps (`float`, *optional*, defaults to 0.001):
|
62 |
+
The epsilon used by the layer normalization layers.
|
63 |
+
|
64 |
+
Example:
|
65 |
+
|
66 |
+
```python
|
67 |
+
>>> from transformers import MobileNetV1Config, MobileNetV1Model
|
68 |
+
|
69 |
+
>>> # Initializing a "mobilenet_v1_1.0_224" style configuration
|
70 |
+
>>> configuration = MobileNetV1Config()
|
71 |
+
|
72 |
+
>>> # Initializing a model from the "mobilenet_v1_1.0_224" style configuration
|
73 |
+
>>> model = MobileNetV1Model(configuration)
|
74 |
+
|
75 |
+
>>> # Accessing the model configuration
|
76 |
+
>>> configuration = model.config
|
77 |
+
```"""
|
78 |
+
|
79 |
+
model_type = "mobilenet_v1"
|
80 |
+
|
81 |
+
def __init__(
|
82 |
+
self,
|
83 |
+
num_channels=3,
|
84 |
+
image_size=224,
|
85 |
+
depth_multiplier=1.0,
|
86 |
+
min_depth=8,
|
87 |
+
hidden_act="relu6",
|
88 |
+
tf_padding=True,
|
89 |
+
classifier_dropout_prob=0.999,
|
90 |
+
initializer_range=0.02,
|
91 |
+
layer_norm_eps=0.001,
|
92 |
+
**kwargs,
|
93 |
+
):
|
94 |
+
super().__init__(**kwargs)
|
95 |
+
|
96 |
+
if depth_multiplier <= 0:
|
97 |
+
raise ValueError("depth_multiplier must be greater than zero.")
|
98 |
+
|
99 |
+
self.num_channels = num_channels
|
100 |
+
self.image_size = image_size
|
101 |
+
self.depth_multiplier = depth_multiplier
|
102 |
+
self.min_depth = min_depth
|
103 |
+
self.hidden_act = hidden_act
|
104 |
+
self.tf_padding = tf_padding
|
105 |
+
self.classifier_dropout_prob = classifier_dropout_prob
|
106 |
+
self.initializer_range = initializer_range
|
107 |
+
self.layer_norm_eps = layer_norm_eps
|
108 |
+
|
109 |
+
|
110 |
+
class MobileNetV1OnnxConfig(OnnxConfig):
|
111 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
112 |
+
|
113 |
+
@property
|
114 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
115 |
+
return OrderedDict([("pixel_values", {0: "batch"})])
|
116 |
+
|
117 |
+
@property
|
118 |
+
def outputs(self) -> Mapping[str, Mapping[int, str]]:
|
119 |
+
if self.task == "image-classification":
|
120 |
+
return OrderedDict([("logits", {0: "batch"})])
|
121 |
+
else:
|
122 |
+
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
|
123 |
+
|
124 |
+
@property
|
125 |
+
def atol_for_validation(self) -> float:
|
126 |
+
return 1e-4
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert MobileNetV1 checkpoints from the tensorflow/models library."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import json
|
20 |
+
import re
|
21 |
+
from pathlib import Path
|
22 |
+
|
23 |
+
import requests
|
24 |
+
import torch
|
25 |
+
from huggingface_hub import hf_hub_download
|
26 |
+
from PIL import Image
|
27 |
+
|
28 |
+
from transformers import (
|
29 |
+
MobileNetV1Config,
|
30 |
+
MobileNetV1ForImageClassification,
|
31 |
+
MobileNetV1ImageProcessor,
|
32 |
+
load_tf_weights_in_mobilenet_v1,
|
33 |
+
)
|
34 |
+
from transformers.utils import logging
|
35 |
+
|
36 |
+
|
37 |
+
logging.set_verbosity_info()
|
38 |
+
logger = logging.get_logger(__name__)
|
39 |
+
|
40 |
+
|
41 |
+
def get_mobilenet_v1_config(model_name):
|
42 |
+
config = MobileNetV1Config(layer_norm_eps=0.001)
|
43 |
+
|
44 |
+
if "_quant" in model_name:
|
45 |
+
raise ValueError("Quantized models are not supported.")
|
46 |
+
|
47 |
+
matches = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$", model_name)
|
48 |
+
if matches:
|
49 |
+
config.depth_multiplier = float(matches[1])
|
50 |
+
config.image_size = int(matches[2])
|
51 |
+
|
52 |
+
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
|
53 |
+
# the usual 1000. The first class (index 0) is "background".
|
54 |
+
config.num_labels = 1001
|
55 |
+
filename = "imagenet-1k-id2label.json"
|
56 |
+
repo_id = "huggingface/label-files"
|
57 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
58 |
+
id2label = {int(k) + 1: v for k, v in id2label.items()}
|
59 |
+
id2label[0] = "background"
|
60 |
+
config.id2label = id2label
|
61 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
62 |
+
|
63 |
+
return config
|
64 |
+
|
65 |
+
|
66 |
+
# We will verify our results on an image of cute cats
|
67 |
+
def prepare_img():
|
68 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
69 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
70 |
+
return im
|
71 |
+
|
72 |
+
|
73 |
+
@torch.no_grad()
|
74 |
+
def convert_movilevit_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
|
75 |
+
"""
|
76 |
+
Copy/paste/tweak model's weights to our MobileNetV1 structure.
|
77 |
+
"""
|
78 |
+
config = get_mobilenet_v1_config(model_name)
|
79 |
+
|
80 |
+
# Load 🤗 model
|
81 |
+
model = MobileNetV1ForImageClassification(config).eval()
|
82 |
+
|
83 |
+
# Load weights from TensorFlow checkpoint
|
84 |
+
load_tf_weights_in_mobilenet_v1(model, config, checkpoint_path)
|
85 |
+
|
86 |
+
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
|
87 |
+
image_processor = MobileNetV1ImageProcessor(
|
88 |
+
crop_size={"width": config.image_size, "height": config.image_size},
|
89 |
+
size={"shortest_edge": config.image_size + 32},
|
90 |
+
)
|
91 |
+
encoding = image_processor(images=prepare_img(), return_tensors="pt")
|
92 |
+
outputs = model(**encoding)
|
93 |
+
logits = outputs.logits
|
94 |
+
|
95 |
+
assert logits.shape == (1, 1001)
|
96 |
+
|
97 |
+
if model_name == "mobilenet_v1_1.0_224":
|
98 |
+
expected_logits = torch.tensor([-4.1739, -1.1233, 3.1205])
|
99 |
+
elif model_name == "mobilenet_v1_0.75_192":
|
100 |
+
expected_logits = torch.tensor([-3.9440, -2.3141, -0.3333])
|
101 |
+
else:
|
102 |
+
expected_logits = None
|
103 |
+
|
104 |
+
if expected_logits is not None:
|
105 |
+
assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
|
106 |
+
|
107 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
108 |
+
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
|
109 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
110 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
111 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
112 |
+
|
113 |
+
if push_to_hub:
|
114 |
+
print("Pushing to the hub...")
|
115 |
+
repo_id = "google/" + model_name
|
116 |
+
image_processor.push_to_hub(repo_id)
|
117 |
+
model.push_to_hub(repo_id)
|
118 |
+
|
119 |
+
|
120 |
+
if __name__ == "__main__":
|
121 |
+
parser = argparse.ArgumentParser()
|
122 |
+
# Required parameters
|
123 |
+
parser.add_argument(
|
124 |
+
"--model_name",
|
125 |
+
default="mobilenet_v1_1.0_224",
|
126 |
+
type=str,
|
127 |
+
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
|
128 |
+
)
|
129 |
+
parser.add_argument(
|
130 |
+
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
|
131 |
+
)
|
132 |
+
parser.add_argument(
|
133 |
+
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
|
134 |
+
)
|
135 |
+
parser.add_argument(
|
136 |
+
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
|
137 |
+
)
|
138 |
+
|
139 |
+
args = parser.parse_args()
|
140 |
+
convert_movilevit_checkpoint(
|
141 |
+
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
|
142 |
+
)
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Feature extractor class for MobileNetV1."""
|
16 |
+
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
from ...utils import logging
|
20 |
+
from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor):
|
27 |
+
def __init__(self, *args, **kwargs) -> None:
|
28 |
+
warnings.warn(
|
29 |
+
"The class MobileNetV1FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
|
30 |
+
" Please use MobileNetV1ImageProcessor instead.",
|
31 |
+
FutureWarning,
|
32 |
+
)
|
33 |
+
super().__init__(*args, **kwargs)
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for MobileNetV1."""
|
16 |
+
|
17 |
+
from typing import Dict, List, Optional, Union
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
22 |
+
from ...image_transforms import (
|
23 |
+
get_resize_output_image_size,
|
24 |
+
resize,
|
25 |
+
to_channel_dimension_format,
|
26 |
+
)
|
27 |
+
from ...image_utils import (
|
28 |
+
IMAGENET_STANDARD_MEAN,
|
29 |
+
IMAGENET_STANDARD_STD,
|
30 |
+
ChannelDimension,
|
31 |
+
ImageInput,
|
32 |
+
PILImageResampling,
|
33 |
+
infer_channel_dimension_format,
|
34 |
+
is_scaled_image,
|
35 |
+
make_list_of_images,
|
36 |
+
to_numpy_array,
|
37 |
+
valid_images,
|
38 |
+
validate_kwargs,
|
39 |
+
validate_preprocess_arguments,
|
40 |
+
)
|
41 |
+
from ...utils import TensorType, logging
|
42 |
+
|
43 |
+
|
44 |
+
logger = logging.get_logger(__name__)
|
45 |
+
|
46 |
+
|
47 |
+
class MobileNetV1ImageProcessor(BaseImageProcessor):
|
48 |
+
r"""
|
49 |
+
Constructs a MobileNetV1 image processor.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
53 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
|
54 |
+
`do_resize` in the `preprocess` method.
|
55 |
+
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
|
56 |
+
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
|
57 |
+
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
|
58 |
+
method.
|
59 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
|
60 |
+
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
|
61 |
+
`preprocess` method.
|
62 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
63 |
+
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
|
64 |
+
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
|
65 |
+
`preprocess` method.
|
66 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
|
67 |
+
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
|
68 |
+
Can be overridden by the `crop_size` parameter in the `preprocess` method.
|
69 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
70 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
71 |
+
parameter in the `preprocess` method.
|
72 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
73 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
74 |
+
`preprocess` method.
|
75 |
+
do_normalize:
|
76 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
77 |
+
method.
|
78 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
79 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
80 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
81 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
82 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
83 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
84 |
+
"""
|
85 |
+
|
86 |
+
model_input_names = ["pixel_values"]
|
87 |
+
|
88 |
+
def __init__(
|
89 |
+
self,
|
90 |
+
do_resize: bool = True,
|
91 |
+
size: Optional[Dict[str, int]] = None,
|
92 |
+
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
93 |
+
do_center_crop: bool = True,
|
94 |
+
crop_size: Dict[str, int] = None,
|
95 |
+
do_rescale: bool = True,
|
96 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
97 |
+
do_normalize: bool = True,
|
98 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
99 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
100 |
+
**kwargs,
|
101 |
+
) -> None:
|
102 |
+
super().__init__(**kwargs)
|
103 |
+
size = size if size is not None else {"shortest_edge": 256}
|
104 |
+
size = get_size_dict(size, default_to_square=False)
|
105 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
106 |
+
crop_size = get_size_dict(crop_size)
|
107 |
+
self.do_resize = do_resize
|
108 |
+
self.size = size
|
109 |
+
self.resample = resample
|
110 |
+
self.do_center_crop = do_center_crop
|
111 |
+
self.crop_size = crop_size
|
112 |
+
self.do_rescale = do_rescale
|
113 |
+
self.rescale_factor = rescale_factor
|
114 |
+
self.do_normalize = do_normalize
|
115 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
|
116 |
+
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
|
117 |
+
self._valid_processor_keys = [
|
118 |
+
"images",
|
119 |
+
"do_resize",
|
120 |
+
"size",
|
121 |
+
"resample",
|
122 |
+
"do_center_crop",
|
123 |
+
"crop_size",
|
124 |
+
"do_rescale",
|
125 |
+
"rescale_factor",
|
126 |
+
"do_normalize",
|
127 |
+
"image_mean",
|
128 |
+
"image_std",
|
129 |
+
"return_tensors",
|
130 |
+
"data_format",
|
131 |
+
"input_data_format",
|
132 |
+
]
|
133 |
+
|
134 |
+
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
|
135 |
+
def resize(
|
136 |
+
self,
|
137 |
+
image: np.ndarray,
|
138 |
+
size: Dict[str, int],
|
139 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
140 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
141 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
142 |
+
**kwargs,
|
143 |
+
) -> np.ndarray:
|
144 |
+
"""
|
145 |
+
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
|
146 |
+
resized to keep the input aspect ratio.
|
147 |
+
|
148 |
+
Args:
|
149 |
+
image (`np.ndarray`):
|
150 |
+
Image to resize.
|
151 |
+
size (`Dict[str, int]`):
|
152 |
+
Size of the output image.
|
153 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
154 |
+
Resampling filter to use when resiizing the image.
|
155 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
156 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
157 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
158 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
159 |
+
"""
|
160 |
+
default_to_square = True
|
161 |
+
if "shortest_edge" in size:
|
162 |
+
size = size["shortest_edge"]
|
163 |
+
default_to_square = False
|
164 |
+
elif "height" in size and "width" in size:
|
165 |
+
size = (size["height"], size["width"])
|
166 |
+
else:
|
167 |
+
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
|
168 |
+
|
169 |
+
output_size = get_resize_output_image_size(
|
170 |
+
image,
|
171 |
+
size=size,
|
172 |
+
default_to_square=default_to_square,
|
173 |
+
input_data_format=input_data_format,
|
174 |
+
)
|
175 |
+
return resize(
|
176 |
+
image,
|
177 |
+
size=output_size,
|
178 |
+
resample=resample,
|
179 |
+
data_format=data_format,
|
180 |
+
input_data_format=input_data_format,
|
181 |
+
**kwargs,
|
182 |
+
)
|
183 |
+
|
184 |
+
def preprocess(
|
185 |
+
self,
|
186 |
+
images: ImageInput,
|
187 |
+
do_resize: Optional[bool] = None,
|
188 |
+
size: Dict[str, int] = None,
|
189 |
+
resample: PILImageResampling = None,
|
190 |
+
do_center_crop: bool = None,
|
191 |
+
crop_size: Dict[str, int] = None,
|
192 |
+
do_rescale: Optional[bool] = None,
|
193 |
+
rescale_factor: Optional[float] = None,
|
194 |
+
do_normalize: Optional[bool] = None,
|
195 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
196 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
197 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
198 |
+
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
|
199 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
200 |
+
**kwargs,
|
201 |
+
):
|
202 |
+
"""
|
203 |
+
Preprocess an image or batch of images.
|
204 |
+
|
205 |
+
Args:
|
206 |
+
images (`ImageInput`):
|
207 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
208 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
209 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
210 |
+
Whether to resize the image.
|
211 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
212 |
+
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
|
213 |
+
the longest edge resized to keep the input aspect ratio.
|
214 |
+
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
|
215 |
+
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
|
216 |
+
an effect if `do_resize` is set to `True`.
|
217 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
218 |
+
Whether to center crop the image.
|
219 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
220 |
+
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
|
221 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
222 |
+
Whether to rescale the image values between [0 - 1].
|
223 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
224 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
225 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
226 |
+
Whether to normalize the image.
|
227 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
228 |
+
Image mean to use if `do_normalize` is set to `True`.
|
229 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
230 |
+
Image standard deviation to use if `do_normalize` is set to `True`.
|
231 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
232 |
+
The type of tensors to return. Can be one of:
|
233 |
+
- Unset: Return a list of `np.ndarray`.
|
234 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
235 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
236 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
237 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
238 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
239 |
+
The channel dimension format for the output image. Can be one of:
|
240 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
241 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
242 |
+
- Unset: Use the channel dimension format of the input image.
|
243 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
244 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
245 |
+
from the input image. Can be one of:
|
246 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
247 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
248 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
249 |
+
"""
|
250 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
251 |
+
size = size if size is not None else self.size
|
252 |
+
size = get_size_dict(size, default_to_square=False)
|
253 |
+
resample = resample if resample is not None else self.resample
|
254 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
255 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
256 |
+
crop_size = get_size_dict(crop_size)
|
257 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
258 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
259 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
260 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
261 |
+
image_std = image_std if image_std is not None else self.image_std
|
262 |
+
|
263 |
+
images = make_list_of_images(images)
|
264 |
+
|
265 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
266 |
+
|
267 |
+
if not valid_images(images):
|
268 |
+
raise ValueError(
|
269 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
270 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
271 |
+
)
|
272 |
+
validate_preprocess_arguments(
|
273 |
+
do_rescale=do_rescale,
|
274 |
+
rescale_factor=rescale_factor,
|
275 |
+
do_normalize=do_normalize,
|
276 |
+
image_mean=image_mean,
|
277 |
+
image_std=image_std,
|
278 |
+
do_center_crop=do_center_crop,
|
279 |
+
crop_size=crop_size,
|
280 |
+
do_resize=do_resize,
|
281 |
+
size=size,
|
282 |
+
resample=resample,
|
283 |
+
)
|
284 |
+
|
285 |
+
# All transformations expect numpy arrays.
|
286 |
+
images = [to_numpy_array(image) for image in images]
|
287 |
+
|
288 |
+
if is_scaled_image(images[0]) and do_rescale:
|
289 |
+
logger.warning_once(
|
290 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
291 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
292 |
+
)
|
293 |
+
|
294 |
+
if input_data_format is None:
|
295 |
+
# We assume that all images have the same channel dimension format.
|
296 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
297 |
+
|
298 |
+
if do_resize:
|
299 |
+
images = [
|
300 |
+
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
301 |
+
for image in images
|
302 |
+
]
|
303 |
+
|
304 |
+
if do_center_crop:
|
305 |
+
images = [
|
306 |
+
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
|
307 |
+
]
|
308 |
+
|
309 |
+
if do_rescale:
|
310 |
+
images = [
|
311 |
+
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
312 |
+
for image in images
|
313 |
+
]
|
314 |
+
|
315 |
+
if do_normalize:
|
316 |
+
images = [
|
317 |
+
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
318 |
+
for image in images
|
319 |
+
]
|
320 |
+
|
321 |
+
images = [
|
322 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
|
323 |
+
]
|
324 |
+
|
325 |
+
data = {"pixel_values": images}
|
326 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
ADDED
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch MobileNetV1 model."""
|
16 |
+
|
17 |
+
|
18 |
+
from typing import Optional, Union
|
19 |
+
|
20 |
+
import torch
|
21 |
+
from torch import nn
|
22 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
23 |
+
|
24 |
+
from ...activations import ACT2FN
|
25 |
+
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
|
26 |
+
from ...modeling_utils import PreTrainedModel
|
27 |
+
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
28 |
+
from .configuration_mobilenet_v1 import MobileNetV1Config
|
29 |
+
|
30 |
+
|
31 |
+
logger = logging.get_logger(__name__)
|
32 |
+
|
33 |
+
|
34 |
+
# General docstring
|
35 |
+
_CONFIG_FOR_DOC = "MobileNetV1Config"
|
36 |
+
|
37 |
+
# Base docstring
|
38 |
+
_CHECKPOINT_FOR_DOC = "google/mobilenet_v1_1.0_224"
|
39 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 1024, 7, 7]
|
40 |
+
|
41 |
+
# Image classification docstring
|
42 |
+
_IMAGE_CLASS_CHECKPOINT = "google/mobilenet_v1_1.0_224"
|
43 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
44 |
+
|
45 |
+
|
46 |
+
from ..deprecated._archive_maps import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
47 |
+
|
48 |
+
|
49 |
+
def _build_tf_to_pytorch_map(model, config, tf_weights=None):
|
50 |
+
"""
|
51 |
+
A map of modules from TF to PyTorch.
|
52 |
+
"""
|
53 |
+
|
54 |
+
tf_to_pt_map = {}
|
55 |
+
|
56 |
+
if isinstance(model, MobileNetV1ForImageClassification):
|
57 |
+
backbone = model.mobilenet_v1
|
58 |
+
else:
|
59 |
+
backbone = model
|
60 |
+
|
61 |
+
prefix = "MobilenetV1/Conv2d_0/"
|
62 |
+
tf_to_pt_map[prefix + "weights"] = backbone.conv_stem.convolution.weight
|
63 |
+
tf_to_pt_map[prefix + "BatchNorm/beta"] = backbone.conv_stem.normalization.bias
|
64 |
+
tf_to_pt_map[prefix + "BatchNorm/gamma"] = backbone.conv_stem.normalization.weight
|
65 |
+
tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.normalization.running_mean
|
66 |
+
tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.normalization.running_var
|
67 |
+
|
68 |
+
for i in range(13):
|
69 |
+
tf_index = i + 1
|
70 |
+
pt_index = i * 2
|
71 |
+
|
72 |
+
pointer = backbone.layer[pt_index]
|
73 |
+
prefix = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
|
74 |
+
tf_to_pt_map[prefix + "depthwise_weights"] = pointer.convolution.weight
|
75 |
+
tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias
|
76 |
+
tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight
|
77 |
+
tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean
|
78 |
+
tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var
|
79 |
+
|
80 |
+
pointer = backbone.layer[pt_index + 1]
|
81 |
+
prefix = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
|
82 |
+
tf_to_pt_map[prefix + "weights"] = pointer.convolution.weight
|
83 |
+
tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias
|
84 |
+
tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight
|
85 |
+
tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean
|
86 |
+
tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var
|
87 |
+
|
88 |
+
if isinstance(model, MobileNetV1ForImageClassification):
|
89 |
+
prefix = "MobilenetV1/Logits/Conv2d_1c_1x1/"
|
90 |
+
tf_to_pt_map[prefix + "weights"] = model.classifier.weight
|
91 |
+
tf_to_pt_map[prefix + "biases"] = model.classifier.bias
|
92 |
+
|
93 |
+
return tf_to_pt_map
|
94 |
+
|
95 |
+
|
96 |
+
def load_tf_weights_in_mobilenet_v1(model, config, tf_checkpoint_path):
|
97 |
+
"""Load TensorFlow checkpoints in a PyTorch model."""
|
98 |
+
try:
|
99 |
+
import numpy as np
|
100 |
+
import tensorflow as tf
|
101 |
+
except ImportError:
|
102 |
+
logger.error(
|
103 |
+
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
|
104 |
+
"https://www.tensorflow.org/install/ for installation instructions."
|
105 |
+
)
|
106 |
+
raise
|
107 |
+
|
108 |
+
# Load weights from TF model
|
109 |
+
init_vars = tf.train.list_variables(tf_checkpoint_path)
|
110 |
+
tf_weights = {}
|
111 |
+
for name, shape in init_vars:
|
112 |
+
logger.info(f"Loading TF weight {name} with shape {shape}")
|
113 |
+
array = tf.train.load_variable(tf_checkpoint_path, name)
|
114 |
+
tf_weights[name] = array
|
115 |
+
|
116 |
+
# Build TF to PyTorch weights loading map
|
117 |
+
tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights)
|
118 |
+
|
119 |
+
for name, pointer in tf_to_pt_map.items():
|
120 |
+
logger.info(f"Importing {name}")
|
121 |
+
if name not in tf_weights:
|
122 |
+
logger.info(f"{name} not in tf pre-trained weights, skipping")
|
123 |
+
continue
|
124 |
+
|
125 |
+
array = tf_weights[name]
|
126 |
+
|
127 |
+
if "depthwise_weights" in name:
|
128 |
+
logger.info("Transposing depthwise")
|
129 |
+
array = np.transpose(array, (2, 3, 0, 1))
|
130 |
+
elif "weights" in name:
|
131 |
+
logger.info("Transposing")
|
132 |
+
if len(pointer.shape) == 2: # copying into linear layer
|
133 |
+
array = array.squeeze().transpose()
|
134 |
+
else:
|
135 |
+
array = np.transpose(array, (3, 2, 0, 1))
|
136 |
+
|
137 |
+
if pointer.shape != array.shape:
|
138 |
+
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
|
139 |
+
|
140 |
+
logger.info(f"Initialize PyTorch weight {name} {array.shape}")
|
141 |
+
pointer.data = torch.from_numpy(array)
|
142 |
+
|
143 |
+
tf_weights.pop(name, None)
|
144 |
+
tf_weights.pop(name + "/RMSProp", None)
|
145 |
+
tf_weights.pop(name + "/RMSProp_1", None)
|
146 |
+
tf_weights.pop(name + "/ExponentialMovingAverage", None)
|
147 |
+
|
148 |
+
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
|
149 |
+
return model
|
150 |
+
|
151 |
+
|
152 |
+
def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor:
|
153 |
+
"""
|
154 |
+
Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at:
|
155 |
+
https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2
|
156 |
+
"""
|
157 |
+
in_height, in_width = features.shape[-2:]
|
158 |
+
stride_height, stride_width = conv_layer.stride
|
159 |
+
kernel_height, kernel_width = conv_layer.kernel_size
|
160 |
+
|
161 |
+
if in_height % stride_height == 0:
|
162 |
+
pad_along_height = max(kernel_height - stride_height, 0)
|
163 |
+
else:
|
164 |
+
pad_along_height = max(kernel_height - (in_height % stride_height), 0)
|
165 |
+
|
166 |
+
if in_width % stride_width == 0:
|
167 |
+
pad_along_width = max(kernel_width - stride_width, 0)
|
168 |
+
else:
|
169 |
+
pad_along_width = max(kernel_width - (in_width % stride_width), 0)
|
170 |
+
|
171 |
+
pad_left = pad_along_width // 2
|
172 |
+
pad_right = pad_along_width - pad_left
|
173 |
+
pad_top = pad_along_height // 2
|
174 |
+
pad_bottom = pad_along_height - pad_top
|
175 |
+
|
176 |
+
padding = (pad_left, pad_right, pad_top, pad_bottom)
|
177 |
+
return nn.functional.pad(features, padding, "constant", 0.0)
|
178 |
+
|
179 |
+
|
180 |
+
class MobileNetV1ConvLayer(nn.Module):
|
181 |
+
def __init__(
|
182 |
+
self,
|
183 |
+
config: MobileNetV1Config,
|
184 |
+
in_channels: int,
|
185 |
+
out_channels: int,
|
186 |
+
kernel_size: int,
|
187 |
+
stride: Optional[int] = 1,
|
188 |
+
groups: Optional[int] = 1,
|
189 |
+
bias: bool = False,
|
190 |
+
use_normalization: Optional[bool] = True,
|
191 |
+
use_activation: Optional[bool or str] = True,
|
192 |
+
) -> None:
|
193 |
+
super().__init__()
|
194 |
+
self.config = config
|
195 |
+
|
196 |
+
if in_channels % groups != 0:
|
197 |
+
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
|
198 |
+
if out_channels % groups != 0:
|
199 |
+
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
|
200 |
+
|
201 |
+
padding = 0 if config.tf_padding else int((kernel_size - 1) / 2)
|
202 |
+
|
203 |
+
self.convolution = nn.Conv2d(
|
204 |
+
in_channels=in_channels,
|
205 |
+
out_channels=out_channels,
|
206 |
+
kernel_size=kernel_size,
|
207 |
+
stride=stride,
|
208 |
+
padding=padding,
|
209 |
+
groups=groups,
|
210 |
+
bias=bias,
|
211 |
+
padding_mode="zeros",
|
212 |
+
)
|
213 |
+
|
214 |
+
if use_normalization:
|
215 |
+
self.normalization = nn.BatchNorm2d(
|
216 |
+
num_features=out_channels,
|
217 |
+
eps=config.layer_norm_eps,
|
218 |
+
momentum=0.9997,
|
219 |
+
affine=True,
|
220 |
+
track_running_stats=True,
|
221 |
+
)
|
222 |
+
else:
|
223 |
+
self.normalization = None
|
224 |
+
|
225 |
+
if use_activation:
|
226 |
+
if isinstance(use_activation, str):
|
227 |
+
self.activation = ACT2FN[use_activation]
|
228 |
+
elif isinstance(config.hidden_act, str):
|
229 |
+
self.activation = ACT2FN[config.hidden_act]
|
230 |
+
else:
|
231 |
+
self.activation = config.hidden_act
|
232 |
+
else:
|
233 |
+
self.activation = None
|
234 |
+
|
235 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
236 |
+
if self.config.tf_padding:
|
237 |
+
features = apply_tf_padding(features, self.convolution)
|
238 |
+
features = self.convolution(features)
|
239 |
+
if self.normalization is not None:
|
240 |
+
features = self.normalization(features)
|
241 |
+
if self.activation is not None:
|
242 |
+
features = self.activation(features)
|
243 |
+
return features
|
244 |
+
|
245 |
+
|
246 |
+
class MobileNetV1PreTrainedModel(PreTrainedModel):
|
247 |
+
"""
|
248 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
249 |
+
models.
|
250 |
+
"""
|
251 |
+
|
252 |
+
config_class = MobileNetV1Config
|
253 |
+
load_tf_weights = load_tf_weights_in_mobilenet_v1
|
254 |
+
base_model_prefix = "mobilenet_v1"
|
255 |
+
main_input_name = "pixel_values"
|
256 |
+
supports_gradient_checkpointing = False
|
257 |
+
|
258 |
+
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
|
259 |
+
"""Initialize the weights"""
|
260 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
261 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
262 |
+
if module.bias is not None:
|
263 |
+
module.bias.data.zero_()
|
264 |
+
elif isinstance(module, nn.BatchNorm2d):
|
265 |
+
module.bias.data.zero_()
|
266 |
+
module.weight.data.fill_(1.0)
|
267 |
+
|
268 |
+
|
269 |
+
MOBILENET_V1_START_DOCSTRING = r"""
|
270 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
271 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
272 |
+
behavior.
|
273 |
+
|
274 |
+
Parameters:
|
275 |
+
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
|
276 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
277 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
278 |
+
"""
|
279 |
+
|
280 |
+
MOBILENET_V1_INPUTS_DOCSTRING = r"""
|
281 |
+
Args:
|
282 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
283 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
284 |
+
[`MobileNetV1ImageProcessor.__call__`] for details.
|
285 |
+
output_hidden_states (`bool`, *optional*):
|
286 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
287 |
+
more detail.
|
288 |
+
return_dict (`bool`, *optional*):
|
289 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
290 |
+
"""
|
291 |
+
|
292 |
+
|
293 |
+
@add_start_docstrings(
|
294 |
+
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.",
|
295 |
+
MOBILENET_V1_START_DOCSTRING,
|
296 |
+
)
|
297 |
+
class MobileNetV1Model(MobileNetV1PreTrainedModel):
|
298 |
+
def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool = True):
|
299 |
+
super().__init__(config)
|
300 |
+
self.config = config
|
301 |
+
|
302 |
+
depth = 32
|
303 |
+
out_channels = max(int(depth * config.depth_multiplier), config.min_depth)
|
304 |
+
|
305 |
+
self.conv_stem = MobileNetV1ConvLayer(
|
306 |
+
config,
|
307 |
+
in_channels=config.num_channels,
|
308 |
+
out_channels=out_channels,
|
309 |
+
kernel_size=3,
|
310 |
+
stride=2,
|
311 |
+
)
|
312 |
+
|
313 |
+
strides = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
|
314 |
+
|
315 |
+
self.layer = nn.ModuleList()
|
316 |
+
for i in range(13):
|
317 |
+
in_channels = out_channels
|
318 |
+
|
319 |
+
if strides[i] == 2 or i == 0:
|
320 |
+
depth *= 2
|
321 |
+
out_channels = max(int(depth * config.depth_multiplier), config.min_depth)
|
322 |
+
|
323 |
+
self.layer.append(
|
324 |
+
MobileNetV1ConvLayer(
|
325 |
+
config,
|
326 |
+
in_channels=in_channels,
|
327 |
+
out_channels=in_channels,
|
328 |
+
kernel_size=3,
|
329 |
+
stride=strides[i],
|
330 |
+
groups=in_channels,
|
331 |
+
)
|
332 |
+
)
|
333 |
+
|
334 |
+
self.layer.append(
|
335 |
+
MobileNetV1ConvLayer(
|
336 |
+
config,
|
337 |
+
in_channels=in_channels,
|
338 |
+
out_channels=out_channels,
|
339 |
+
kernel_size=1,
|
340 |
+
)
|
341 |
+
)
|
342 |
+
|
343 |
+
self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None
|
344 |
+
|
345 |
+
# Initialize weights and apply final processing
|
346 |
+
self.post_init()
|
347 |
+
|
348 |
+
def _prune_heads(self, heads_to_prune):
|
349 |
+
raise NotImplementedError
|
350 |
+
|
351 |
+
@add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING)
|
352 |
+
@add_code_sample_docstrings(
|
353 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
354 |
+
output_type=BaseModelOutputWithPoolingAndNoAttention,
|
355 |
+
config_class=_CONFIG_FOR_DOC,
|
356 |
+
modality="vision",
|
357 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
358 |
+
)
|
359 |
+
def forward(
|
360 |
+
self,
|
361 |
+
pixel_values: Optional[torch.Tensor] = None,
|
362 |
+
output_hidden_states: Optional[bool] = None,
|
363 |
+
return_dict: Optional[bool] = None,
|
364 |
+
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
|
365 |
+
output_hidden_states = (
|
366 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
367 |
+
)
|
368 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
369 |
+
|
370 |
+
if pixel_values is None:
|
371 |
+
raise ValueError("You have to specify pixel_values")
|
372 |
+
|
373 |
+
hidden_states = self.conv_stem(pixel_values)
|
374 |
+
|
375 |
+
all_hidden_states = () if output_hidden_states else None
|
376 |
+
|
377 |
+
for i, layer_module in enumerate(self.layer):
|
378 |
+
hidden_states = layer_module(hidden_states)
|
379 |
+
|
380 |
+
if output_hidden_states:
|
381 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
382 |
+
|
383 |
+
last_hidden_state = hidden_states
|
384 |
+
|
385 |
+
if self.pooler is not None:
|
386 |
+
pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1)
|
387 |
+
else:
|
388 |
+
pooled_output = None
|
389 |
+
|
390 |
+
if not return_dict:
|
391 |
+
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
|
392 |
+
|
393 |
+
return BaseModelOutputWithPoolingAndNoAttention(
|
394 |
+
last_hidden_state=last_hidden_state,
|
395 |
+
pooler_output=pooled_output,
|
396 |
+
hidden_states=all_hidden_states,
|
397 |
+
)
|
398 |
+
|
399 |
+
|
400 |
+
@add_start_docstrings(
|
401 |
+
"""
|
402 |
+
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
|
403 |
+
ImageNet.
|
404 |
+
""",
|
405 |
+
MOBILENET_V1_START_DOCSTRING,
|
406 |
+
)
|
407 |
+
class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel):
|
408 |
+
def __init__(self, config: MobileNetV1Config) -> None:
|
409 |
+
super().__init__(config)
|
410 |
+
|
411 |
+
self.num_labels = config.num_labels
|
412 |
+
self.mobilenet_v1 = MobileNetV1Model(config)
|
413 |
+
|
414 |
+
last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels
|
415 |
+
|
416 |
+
# Classifier head
|
417 |
+
self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
|
418 |
+
self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
419 |
+
|
420 |
+
# Initialize weights and apply final processing
|
421 |
+
self.post_init()
|
422 |
+
|
423 |
+
@add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING)
|
424 |
+
@add_code_sample_docstrings(
|
425 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
426 |
+
output_type=ImageClassifierOutputWithNoAttention,
|
427 |
+
config_class=_CONFIG_FOR_DOC,
|
428 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
429 |
+
)
|
430 |
+
def forward(
|
431 |
+
self,
|
432 |
+
pixel_values: Optional[torch.Tensor] = None,
|
433 |
+
output_hidden_states: Optional[bool] = None,
|
434 |
+
labels: Optional[torch.Tensor] = None,
|
435 |
+
return_dict: Optional[bool] = None,
|
436 |
+
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
|
437 |
+
r"""
|
438 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
439 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
440 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
|
441 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
442 |
+
"""
|
443 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
444 |
+
|
445 |
+
outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
446 |
+
|
447 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
448 |
+
|
449 |
+
logits = self.classifier(self.dropout(pooled_output))
|
450 |
+
|
451 |
+
loss = None
|
452 |
+
if labels is not None:
|
453 |
+
if self.config.problem_type is None:
|
454 |
+
if self.num_labels == 1:
|
455 |
+
self.config.problem_type = "regression"
|
456 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
457 |
+
self.config.problem_type = "single_label_classification"
|
458 |
+
else:
|
459 |
+
self.config.problem_type = "multi_label_classification"
|
460 |
+
|
461 |
+
if self.config.problem_type == "regression":
|
462 |
+
loss_fct = MSELoss()
|
463 |
+
if self.num_labels == 1:
|
464 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
465 |
+
else:
|
466 |
+
loss = loss_fct(logits, labels)
|
467 |
+
elif self.config.problem_type == "single_label_classification":
|
468 |
+
loss_fct = CrossEntropyLoss()
|
469 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
470 |
+
elif self.config.problem_type == "multi_label_classification":
|
471 |
+
loss_fct = BCEWithLogitsLoss()
|
472 |
+
loss = loss_fct(logits, labels)
|
473 |
+
|
474 |
+
if not return_dict:
|
475 |
+
output = (logits,) + outputs[2:]
|
476 |
+
return ((loss,) + output) if loss is not None else output
|
477 |
+
|
478 |
+
return ImageClassifierOutputWithNoAttention(
|
479 |
+
loss=loss,
|
480 |
+
logits=logits,
|
481 |
+
hidden_states=outputs.hidden_states,
|
482 |
+
)
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__init__.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_essentia_available,
|
20 |
+
is_librosa_available,
|
21 |
+
is_pretty_midi_available,
|
22 |
+
is_scipy_available,
|
23 |
+
is_torch_available,
|
24 |
+
)
|
25 |
+
|
26 |
+
|
27 |
+
_import_structure = {
|
28 |
+
"configuration_pop2piano": ["POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pop2PianoConfig"],
|
29 |
+
}
|
30 |
+
|
31 |
+
try:
|
32 |
+
if not is_torch_available():
|
33 |
+
raise OptionalDependencyNotAvailable()
|
34 |
+
except OptionalDependencyNotAvailable:
|
35 |
+
pass
|
36 |
+
else:
|
37 |
+
_import_structure["modeling_pop2piano"] = [
|
38 |
+
"POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST",
|
39 |
+
"Pop2PianoForConditionalGeneration",
|
40 |
+
"Pop2PianoPreTrainedModel",
|
41 |
+
]
|
42 |
+
|
43 |
+
try:
|
44 |
+
if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available()):
|
45 |
+
raise OptionalDependencyNotAvailable()
|
46 |
+
except OptionalDependencyNotAvailable:
|
47 |
+
pass
|
48 |
+
else:
|
49 |
+
_import_structure["feature_extraction_pop2piano"] = ["Pop2PianoFeatureExtractor"]
|
50 |
+
|
51 |
+
try:
|
52 |
+
if not (is_pretty_midi_available() and is_torch_available()):
|
53 |
+
raise OptionalDependencyNotAvailable()
|
54 |
+
except OptionalDependencyNotAvailable:
|
55 |
+
pass
|
56 |
+
else:
|
57 |
+
_import_structure["tokenization_pop2piano"] = ["Pop2PianoTokenizer"]
|
58 |
+
|
59 |
+
try:
|
60 |
+
if not (
|
61 |
+
is_pretty_midi_available()
|
62 |
+
and is_torch_available()
|
63 |
+
and is_librosa_available()
|
64 |
+
and is_essentia_available()
|
65 |
+
and is_scipy_available()
|
66 |
+
):
|
67 |
+
raise OptionalDependencyNotAvailable()
|
68 |
+
except OptionalDependencyNotAvailable:
|
69 |
+
pass
|
70 |
+
else:
|
71 |
+
_import_structure["processing_pop2piano"] = ["Pop2PianoProcessor"]
|
72 |
+
|
73 |
+
|
74 |
+
if TYPE_CHECKING:
|
75 |
+
from .configuration_pop2piano import POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP, Pop2PianoConfig
|
76 |
+
|
77 |
+
try:
|
78 |
+
if not is_torch_available():
|
79 |
+
raise OptionalDependencyNotAvailable()
|
80 |
+
except OptionalDependencyNotAvailable:
|
81 |
+
pass
|
82 |
+
else:
|
83 |
+
from .modeling_pop2piano import (
|
84 |
+
POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST,
|
85 |
+
Pop2PianoForConditionalGeneration,
|
86 |
+
Pop2PianoPreTrainedModel,
|
87 |
+
)
|
88 |
+
|
89 |
+
try:
|
90 |
+
if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available()):
|
91 |
+
raise OptionalDependencyNotAvailable()
|
92 |
+
except OptionalDependencyNotAvailable:
|
93 |
+
pass
|
94 |
+
else:
|
95 |
+
from .feature_extraction_pop2piano import Pop2PianoFeatureExtractor
|
96 |
+
|
97 |
+
try:
|
98 |
+
if not (is_pretty_midi_available() and is_torch_available()):
|
99 |
+
raise OptionalDependencyNotAvailable()
|
100 |
+
except OptionalDependencyNotAvailable:
|
101 |
+
pass
|
102 |
+
else:
|
103 |
+
from .tokenization_pop2piano import Pop2PianoTokenizer
|
104 |
+
|
105 |
+
try:
|
106 |
+
if not (
|
107 |
+
is_pretty_midi_available()
|
108 |
+
and is_torch_available()
|
109 |
+
and is_librosa_available()
|
110 |
+
and is_essentia_available()
|
111 |
+
and is_scipy_available()
|
112 |
+
):
|
113 |
+
raise OptionalDependencyNotAvailable()
|
114 |
+
except OptionalDependencyNotAvailable:
|
115 |
+
pass
|
116 |
+
else:
|
117 |
+
from .processing_pop2piano import Pop2PianoProcessor
|
118 |
+
|
119 |
+
else:
|
120 |
+
import sys
|
121 |
+
|
122 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.77 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/configuration_pop2piano.cpython-310.pyc
ADDED
Binary file (5.14 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/convert_pop2piano_weights_to_hf.cpython-310.pyc
ADDED
Binary file (4.22 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/feature_extraction_pop2piano.cpython-310.pyc
ADDED
Binary file (14.5 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/modeling_pop2piano.cpython-310.pyc
ADDED
Binary file (40.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/processing_pop2piano.cpython-310.pyc
ADDED
Binary file (4.58 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/__pycache__/tokenization_pop2piano.cpython-310.pyc
ADDED
Binary file (24.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/models/pop2piano/configuration_pop2piano.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" Pop2Piano model configuration"""
|
16 |
+
|
17 |
+
|
18 |
+
from ...configuration_utils import PretrainedConfig
|
19 |
+
from ...utils import logging
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.get_logger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
from ..deprecated._archive_maps import POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
26 |
+
|
27 |
+
|
28 |
+
class Pop2PianoConfig(PretrainedConfig):
|
29 |
+
r"""
|
30 |
+
This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used
|
31 |
+
to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model
|
32 |
+
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
|
33 |
+
Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture.
|
34 |
+
|
35 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
36 |
+
documentation from [`PretrainedConfig`] for more information.
|
37 |
+
|
38 |
+
Arguments:
|
39 |
+
vocab_size (`int`, *optional*, defaults to 2400):
|
40 |
+
Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens
|
41 |
+
that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`].
|
42 |
+
composer_vocab_size (`int`, *optional*, defaults to 21):
|
43 |
+
Denotes the number of composers.
|
44 |
+
d_model (`int`, *optional*, defaults to 512):
|
45 |
+
Size of the encoder layers and the pooler layer.
|
46 |
+
d_kv (`int`, *optional*, defaults to 64):
|
47 |
+
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
|
48 |
+
be defined as `num_heads * d_kv`.
|
49 |
+
d_ff (`int`, *optional*, defaults to 2048):
|
50 |
+
Size of the intermediate feed forward layer in each `Pop2PianoBlock`.
|
51 |
+
num_layers (`int`, *optional*, defaults to 6):
|
52 |
+
Number of hidden layers in the Transformer encoder.
|
53 |
+
num_decoder_layers (`int`, *optional*):
|
54 |
+
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
|
55 |
+
num_heads (`int`, *optional*, defaults to 8):
|
56 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
57 |
+
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
|
58 |
+
The number of buckets to use for each attention layer.
|
59 |
+
relative_attention_max_distance (`int`, *optional*, defaults to 128):
|
60 |
+
The maximum distance of the longer sequences for the bucket separation.
|
61 |
+
dropout_rate (`float`, *optional*, defaults to 0.1):
|
62 |
+
The ratio for all dropout layers.
|
63 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-6):
|
64 |
+
The epsilon used by the layer normalization layers.
|
65 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
66 |
+
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
|
67 |
+
testing).
|
68 |
+
feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
|
69 |
+
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
|
70 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
71 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
72 |
+
dense_act_fn (`string`, *optional*, defaults to `"relu"`):
|
73 |
+
Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`.
|
74 |
+
"""
|
75 |
+
|
76 |
+
model_type = "pop2piano"
|
77 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
78 |
+
|
79 |
+
def __init__(
|
80 |
+
self,
|
81 |
+
vocab_size=2400,
|
82 |
+
composer_vocab_size=21,
|
83 |
+
d_model=512,
|
84 |
+
d_kv=64,
|
85 |
+
d_ff=2048,
|
86 |
+
num_layers=6,
|
87 |
+
num_decoder_layers=None,
|
88 |
+
num_heads=8,
|
89 |
+
relative_attention_num_buckets=32,
|
90 |
+
relative_attention_max_distance=128,
|
91 |
+
dropout_rate=0.1,
|
92 |
+
layer_norm_epsilon=1e-6,
|
93 |
+
initializer_factor=1.0,
|
94 |
+
feed_forward_proj="gated-gelu", # noqa
|
95 |
+
is_encoder_decoder=True,
|
96 |
+
use_cache=True,
|
97 |
+
pad_token_id=0,
|
98 |
+
eos_token_id=1,
|
99 |
+
dense_act_fn="relu",
|
100 |
+
**kwargs,
|
101 |
+
):
|
102 |
+
self.vocab_size = vocab_size
|
103 |
+
self.composer_vocab_size = composer_vocab_size
|
104 |
+
self.d_model = d_model
|
105 |
+
self.d_kv = d_kv
|
106 |
+
self.d_ff = d_ff
|
107 |
+
self.num_layers = num_layers
|
108 |
+
self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
|
109 |
+
self.num_heads = num_heads
|
110 |
+
self.relative_attention_num_buckets = relative_attention_num_buckets
|
111 |
+
self.relative_attention_max_distance = relative_attention_max_distance
|
112 |
+
self.dropout_rate = dropout_rate
|
113 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
114 |
+
self.initializer_factor = initializer_factor
|
115 |
+
self.feed_forward_proj = feed_forward_proj
|
116 |
+
self.use_cache = use_cache
|
117 |
+
self.dense_act_fn = dense_act_fn
|
118 |
+
self.is_gated_act = self.feed_forward_proj.split("-")[0] == "gated"
|
119 |
+
self.hidden_size = self.d_model
|
120 |
+
self.num_attention_heads = num_heads
|
121 |
+
self.num_hidden_layers = num_layers
|
122 |
+
|
123 |
+
super().__init__(
|
124 |
+
pad_token_id=pad_token_id,
|
125 |
+
eos_token_id=eos_token_id,
|
126 |
+
is_encoder_decoder=is_encoder_decoder,
|
127 |
+
**kwargs,
|
128 |
+
)
|