Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/data/__init__.py +44 -0
- llmeval-env/lib/python3.10/site-packages/transformers/data/data_collator.py +1568 -0
- llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/glue.py +161 -0
- llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py +530 -0
- llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/squad.py +229 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/ms_deform_attn.h +61 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/vision.cpp +16 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu +186 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp +66 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common.h +10 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h +9 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h +79 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu +588 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h +71 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu +825 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h +157 -0
- llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp +128 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/__init__.py +60 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/configuration_git.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/convert_git_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/modeling_git.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/processing_git.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/configuration_git.py +240 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/convert_git_to_pytorch.py +428 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/modeling_git.py +1543 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/git/processing_git.py +113 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__init__.py +15 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/auto.py +161 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/base.py +213 -0
- llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py +98 -0
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc
ADDED
Binary file (23.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc
ADDED
Binary file (29.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc
ADDED
Binary file (23.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc
ADDED
Binary file (28.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc
ADDED
Binary file (3.58 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc
ADDED
Binary file (25.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc
ADDED
Binary file (54.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/data/__init__.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from .data_collator import (
|
16 |
+
DataCollatorForLanguageModeling,
|
17 |
+
DataCollatorForPermutationLanguageModeling,
|
18 |
+
DataCollatorForSeq2Seq,
|
19 |
+
DataCollatorForSOP,
|
20 |
+
DataCollatorForTokenClassification,
|
21 |
+
DataCollatorForWholeWordMask,
|
22 |
+
DataCollatorWithPadding,
|
23 |
+
DefaultDataCollator,
|
24 |
+
default_data_collator,
|
25 |
+
)
|
26 |
+
from .metrics import glue_compute_metrics, xnli_compute_metrics
|
27 |
+
from .processors import (
|
28 |
+
DataProcessor,
|
29 |
+
InputExample,
|
30 |
+
InputFeatures,
|
31 |
+
SingleSentenceClassificationProcessor,
|
32 |
+
SquadExample,
|
33 |
+
SquadFeatures,
|
34 |
+
SquadV1Processor,
|
35 |
+
SquadV2Processor,
|
36 |
+
glue_convert_examples_to_features,
|
37 |
+
glue_output_modes,
|
38 |
+
glue_processors,
|
39 |
+
glue_tasks_num_labels,
|
40 |
+
squad_convert_examples_to_features,
|
41 |
+
xnli_output_modes,
|
42 |
+
xnli_processors,
|
43 |
+
xnli_tasks_num_labels,
|
44 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/data/data_collator.py
ADDED
@@ -0,0 +1,1568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import random
|
16 |
+
import warnings
|
17 |
+
from collections.abc import Mapping
|
18 |
+
from dataclasses import dataclass
|
19 |
+
from random import randint
|
20 |
+
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
from ..models.bert import BertTokenizer, BertTokenizerFast
|
25 |
+
from ..tokenization_utils_base import PreTrainedTokenizerBase
|
26 |
+
from ..utils import PaddingStrategy
|
27 |
+
|
28 |
+
|
29 |
+
InputDataClass = NewType("InputDataClass", Any)
|
30 |
+
|
31 |
+
"""
|
32 |
+
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
|
33 |
+
of PyTorch/TensorFlow tensors or NumPy arrays.
|
34 |
+
"""
|
35 |
+
DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
|
36 |
+
|
37 |
+
|
38 |
+
class DataCollatorMixin:
|
39 |
+
def __call__(self, features, return_tensors=None):
|
40 |
+
if return_tensors is None:
|
41 |
+
return_tensors = self.return_tensors
|
42 |
+
if return_tensors == "tf":
|
43 |
+
return self.tf_call(features)
|
44 |
+
elif return_tensors == "pt":
|
45 |
+
return self.torch_call(features)
|
46 |
+
elif return_tensors == "np":
|
47 |
+
return self.numpy_call(features)
|
48 |
+
else:
|
49 |
+
raise ValueError(f"Framework '{return_tensors}' not recognized!")
|
50 |
+
|
51 |
+
|
52 |
+
def pad_without_fast_tokenizer_warning(tokenizer, *pad_args, **pad_kwargs):
|
53 |
+
"""
|
54 |
+
Pads without triggering the warning about how using the pad function is sub-optimal when using a fast tokenizer.
|
55 |
+
"""
|
56 |
+
|
57 |
+
# To avoid errors when using Feature extractors
|
58 |
+
if not hasattr(tokenizer, "deprecation_warnings"):
|
59 |
+
return tokenizer.pad(*pad_args, **pad_kwargs)
|
60 |
+
|
61 |
+
# Save the state of the warning, then disable it
|
62 |
+
warning_state = tokenizer.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False)
|
63 |
+
tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
|
64 |
+
|
65 |
+
try:
|
66 |
+
padded = tokenizer.pad(*pad_args, **pad_kwargs)
|
67 |
+
finally:
|
68 |
+
# Restore the state of the warning.
|
69 |
+
tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = warning_state
|
70 |
+
|
71 |
+
return padded
|
72 |
+
|
73 |
+
|
74 |
+
def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
|
75 |
+
"""
|
76 |
+
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
|
77 |
+
potential keys named:
|
78 |
+
|
79 |
+
- `label`: handles a single value (int or float) per object
|
80 |
+
- `label_ids`: handles a list of values per object
|
81 |
+
|
82 |
+
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
|
83 |
+
to the model. See glue and ner for example of how it's useful.
|
84 |
+
"""
|
85 |
+
|
86 |
+
# In this function we'll make the assumption that all `features` in the batch
|
87 |
+
# have the same attributes.
|
88 |
+
# So we will look at the first element as a proxy for what attributes exist
|
89 |
+
# on the whole batch.
|
90 |
+
|
91 |
+
if return_tensors == "pt":
|
92 |
+
return torch_default_data_collator(features)
|
93 |
+
elif return_tensors == "tf":
|
94 |
+
return tf_default_data_collator(features)
|
95 |
+
elif return_tensors == "np":
|
96 |
+
return numpy_default_data_collator(features)
|
97 |
+
|
98 |
+
|
99 |
+
@dataclass
|
100 |
+
class DefaultDataCollator(DataCollatorMixin):
|
101 |
+
"""
|
102 |
+
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
|
103 |
+
potential keys named:
|
104 |
+
|
105 |
+
- `label`: handles a single value (int or float) per object
|
106 |
+
- `label_ids`: handles a list of values per object
|
107 |
+
|
108 |
+
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
|
109 |
+
to the model. See glue and ner for example of how it's useful.
|
110 |
+
|
111 |
+
This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
|
112 |
+
helpful if you need to set a return_tensors value at initialization.
|
113 |
+
|
114 |
+
Args:
|
115 |
+
return_tensors (`str`, *optional*, defaults to `"pt"`):
|
116 |
+
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
|
117 |
+
"""
|
118 |
+
|
119 |
+
return_tensors: str = "pt"
|
120 |
+
|
121 |
+
def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
|
122 |
+
if return_tensors is None:
|
123 |
+
return_tensors = self.return_tensors
|
124 |
+
return default_data_collator(features, return_tensors)
|
125 |
+
|
126 |
+
|
127 |
+
def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
|
128 |
+
import torch
|
129 |
+
|
130 |
+
if not isinstance(features[0], Mapping):
|
131 |
+
features = [vars(f) for f in features]
|
132 |
+
first = features[0]
|
133 |
+
batch = {}
|
134 |
+
|
135 |
+
# Special handling for labels.
|
136 |
+
# Ensure that tensor is created with the correct type
|
137 |
+
# (it should be automatically the case, but let's make sure of it.)
|
138 |
+
if "label" in first and first["label"] is not None:
|
139 |
+
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
|
140 |
+
dtype = torch.long if isinstance(label, int) else torch.float
|
141 |
+
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
|
142 |
+
elif "label_ids" in first and first["label_ids"] is not None:
|
143 |
+
if isinstance(first["label_ids"], torch.Tensor):
|
144 |
+
batch["labels"] = torch.stack([f["label_ids"] for f in features])
|
145 |
+
else:
|
146 |
+
dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
|
147 |
+
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
|
148 |
+
|
149 |
+
# Handling of all other possible keys.
|
150 |
+
# Again, we will use the first element to figure out which key/values are not None for this model.
|
151 |
+
for k, v in first.items():
|
152 |
+
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
|
153 |
+
if isinstance(v, torch.Tensor):
|
154 |
+
batch[k] = torch.stack([f[k] for f in features])
|
155 |
+
elif isinstance(v, np.ndarray):
|
156 |
+
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
|
157 |
+
else:
|
158 |
+
batch[k] = torch.tensor([f[k] for f in features])
|
159 |
+
|
160 |
+
return batch
|
161 |
+
|
162 |
+
|
163 |
+
def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
|
164 |
+
import tensorflow as tf
|
165 |
+
|
166 |
+
if not isinstance(features[0], Mapping):
|
167 |
+
features = [vars(f) for f in features]
|
168 |
+
first = features[0]
|
169 |
+
batch = {}
|
170 |
+
|
171 |
+
# Special handling for labels.
|
172 |
+
# Ensure that tensor is created with the correct type
|
173 |
+
# (it should be automatically the case, but let's make sure of it.)
|
174 |
+
if "label" in first and first["label"] is not None:
|
175 |
+
label_col_name = "label"
|
176 |
+
elif "label_ids" in first and first["label_ids"] is not None:
|
177 |
+
label_col_name = "label_ids"
|
178 |
+
elif "labels" in first and first["labels"] is not None:
|
179 |
+
label_col_name = "labels"
|
180 |
+
else:
|
181 |
+
label_col_name = None
|
182 |
+
if label_col_name is not None:
|
183 |
+
if isinstance(first[label_col_name], tf.Tensor):
|
184 |
+
dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
|
185 |
+
elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
|
186 |
+
dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
|
187 |
+
elif isinstance(first[label_col_name], (tuple, list)):
|
188 |
+
dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
|
189 |
+
else:
|
190 |
+
dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
|
191 |
+
batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
|
192 |
+
# Handling of all other possible keys.
|
193 |
+
# Again, we will use the first element to figure out which key/values are not None for this model.
|
194 |
+
for k, v in first.items():
|
195 |
+
if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
|
196 |
+
if isinstance(v, (tf.Tensor, np.ndarray)):
|
197 |
+
batch[k] = tf.stack([f[k] for f in features])
|
198 |
+
else:
|
199 |
+
batch[k] = tf.convert_to_tensor([f[k] for f in features])
|
200 |
+
|
201 |
+
return batch
|
202 |
+
|
203 |
+
|
204 |
+
def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
|
205 |
+
if not isinstance(features[0], Mapping):
|
206 |
+
features = [vars(f) for f in features]
|
207 |
+
first = features[0]
|
208 |
+
batch = {}
|
209 |
+
|
210 |
+
# Special handling for labels.
|
211 |
+
# Ensure that tensor is created with the correct type
|
212 |
+
# (it should be automatically the case, but let's make sure of it.)
|
213 |
+
if "label" in first and first["label"] is not None:
|
214 |
+
label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
|
215 |
+
dtype = np.int64 if isinstance(label, int) else np.float32
|
216 |
+
batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
|
217 |
+
elif "label_ids" in first and first["label_ids"] is not None:
|
218 |
+
if isinstance(first["label_ids"], np.ndarray):
|
219 |
+
batch["labels"] = np.stack([f["label_ids"] for f in features])
|
220 |
+
else:
|
221 |
+
dtype = np.int64 if isinstance(first["label_ids"][0], int) else np.float32
|
222 |
+
batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
|
223 |
+
|
224 |
+
# Handling of all other possible keys.
|
225 |
+
# Again, we will use the first element to figure out which key/values are not None for this model.
|
226 |
+
for k, v in first.items():
|
227 |
+
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
|
228 |
+
if isinstance(v, np.ndarray):
|
229 |
+
batch[k] = np.stack([f[k] for f in features])
|
230 |
+
else:
|
231 |
+
batch[k] = np.array([f[k] for f in features])
|
232 |
+
|
233 |
+
return batch
|
234 |
+
|
235 |
+
|
236 |
+
@dataclass
|
237 |
+
class DataCollatorWithPadding:
|
238 |
+
"""
|
239 |
+
Data collator that will dynamically pad the inputs received.
|
240 |
+
|
241 |
+
Args:
|
242 |
+
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
|
243 |
+
The tokenizer used for encoding the data.
|
244 |
+
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
|
245 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
|
246 |
+
among:
|
247 |
+
|
248 |
+
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
|
249 |
+
sequence is provided).
|
250 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
251 |
+
acceptable input length for the model if that argument is not provided.
|
252 |
+
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
|
253 |
+
max_length (`int`, *optional*):
|
254 |
+
Maximum length of the returned list and optionally padding length (see above).
|
255 |
+
pad_to_multiple_of (`int`, *optional*):
|
256 |
+
If set will pad the sequence to a multiple of the provided value.
|
257 |
+
|
258 |
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
|
259 |
+
7.5 (Volta).
|
260 |
+
return_tensors (`str`, *optional*, defaults to `"pt"`):
|
261 |
+
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
|
262 |
+
"""
|
263 |
+
|
264 |
+
tokenizer: PreTrainedTokenizerBase
|
265 |
+
padding: Union[bool, str, PaddingStrategy] = True
|
266 |
+
max_length: Optional[int] = None
|
267 |
+
pad_to_multiple_of: Optional[int] = None
|
268 |
+
return_tensors: str = "pt"
|
269 |
+
|
270 |
+
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
|
271 |
+
batch = pad_without_fast_tokenizer_warning(
|
272 |
+
self.tokenizer,
|
273 |
+
features,
|
274 |
+
padding=self.padding,
|
275 |
+
max_length=self.max_length,
|
276 |
+
pad_to_multiple_of=self.pad_to_multiple_of,
|
277 |
+
return_tensors=self.return_tensors,
|
278 |
+
)
|
279 |
+
if "label" in batch:
|
280 |
+
batch["labels"] = batch["label"]
|
281 |
+
del batch["label"]
|
282 |
+
if "label_ids" in batch:
|
283 |
+
batch["labels"] = batch["label_ids"]
|
284 |
+
del batch["label_ids"]
|
285 |
+
return batch
|
286 |
+
|
287 |
+
|
288 |
+
@dataclass
|
289 |
+
class DataCollatorForTokenClassification(DataCollatorMixin):
|
290 |
+
"""
|
291 |
+
Data collator that will dynamically pad the inputs received, as well as the labels.
|
292 |
+
|
293 |
+
Args:
|
294 |
+
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
|
295 |
+
The tokenizer used for encoding the data.
|
296 |
+
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
|
297 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
|
298 |
+
among:
|
299 |
+
|
300 |
+
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
|
301 |
+
sequence is provided).
|
302 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
303 |
+
acceptable input length for the model if that argument is not provided.
|
304 |
+
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
|
305 |
+
max_length (`int`, *optional*):
|
306 |
+
Maximum length of the returned list and optionally padding length (see above).
|
307 |
+
pad_to_multiple_of (`int`, *optional*):
|
308 |
+
If set will pad the sequence to a multiple of the provided value.
|
309 |
+
|
310 |
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
|
311 |
+
7.5 (Volta).
|
312 |
+
label_pad_token_id (`int`, *optional*, defaults to -100):
|
313 |
+
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
|
314 |
+
return_tensors (`str`, *optional*, defaults to `"pt"`):
|
315 |
+
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
|
316 |
+
"""
|
317 |
+
|
318 |
+
tokenizer: PreTrainedTokenizerBase
|
319 |
+
padding: Union[bool, str, PaddingStrategy] = True
|
320 |
+
max_length: Optional[int] = None
|
321 |
+
pad_to_multiple_of: Optional[int] = None
|
322 |
+
label_pad_token_id: int = -100
|
323 |
+
return_tensors: str = "pt"
|
324 |
+
|
325 |
+
def torch_call(self, features):
|
326 |
+
import torch
|
327 |
+
|
328 |
+
label_name = "label" if "label" in features[0].keys() else "labels"
|
329 |
+
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
|
330 |
+
|
331 |
+
no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
|
332 |
+
|
333 |
+
batch = pad_without_fast_tokenizer_warning(
|
334 |
+
self.tokenizer,
|
335 |
+
no_labels_features,
|
336 |
+
padding=self.padding,
|
337 |
+
max_length=self.max_length,
|
338 |
+
pad_to_multiple_of=self.pad_to_multiple_of,
|
339 |
+
return_tensors="pt",
|
340 |
+
)
|
341 |
+
|
342 |
+
if labels is None:
|
343 |
+
return batch
|
344 |
+
|
345 |
+
sequence_length = batch["input_ids"].shape[1]
|
346 |
+
padding_side = self.tokenizer.padding_side
|
347 |
+
|
348 |
+
def to_list(tensor_or_iterable):
|
349 |
+
if isinstance(tensor_or_iterable, torch.Tensor):
|
350 |
+
return tensor_or_iterable.tolist()
|
351 |
+
return list(tensor_or_iterable)
|
352 |
+
|
353 |
+
if padding_side == "right":
|
354 |
+
batch[label_name] = [
|
355 |
+
to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
|
356 |
+
]
|
357 |
+
else:
|
358 |
+
batch[label_name] = [
|
359 |
+
[self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
|
360 |
+
]
|
361 |
+
|
362 |
+
batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
|
363 |
+
return batch
|
364 |
+
|
365 |
+
def tf_call(self, features):
|
366 |
+
import tensorflow as tf
|
367 |
+
|
368 |
+
label_name = "label" if "label" in features[0].keys() else "labels"
|
369 |
+
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
|
370 |
+
batch = pad_without_fast_tokenizer_warning(
|
371 |
+
self.tokenizer,
|
372 |
+
features,
|
373 |
+
padding=self.padding,
|
374 |
+
max_length=self.max_length,
|
375 |
+
pad_to_multiple_of=self.pad_to_multiple_of,
|
376 |
+
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
|
377 |
+
return_tensors="tf" if labels is None else None,
|
378 |
+
)
|
379 |
+
|
380 |
+
if labels is None:
|
381 |
+
return batch
|
382 |
+
|
383 |
+
sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
|
384 |
+
padding_side = self.tokenizer.padding_side
|
385 |
+
if padding_side == "right":
|
386 |
+
batch["labels"] = [
|
387 |
+
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
|
388 |
+
]
|
389 |
+
else:
|
390 |
+
batch["labels"] = [
|
391 |
+
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
|
392 |
+
]
|
393 |
+
|
394 |
+
batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
|
395 |
+
return batch
|
396 |
+
|
397 |
+
def numpy_call(self, features):
|
398 |
+
label_name = "label" if "label" in features[0].keys() else "labels"
|
399 |
+
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
|
400 |
+
batch = pad_without_fast_tokenizer_warning(
|
401 |
+
self.tokenizer,
|
402 |
+
features,
|
403 |
+
padding=self.padding,
|
404 |
+
max_length=self.max_length,
|
405 |
+
pad_to_multiple_of=self.pad_to_multiple_of,
|
406 |
+
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
|
407 |
+
return_tensors="np" if labels is None else None,
|
408 |
+
)
|
409 |
+
|
410 |
+
if labels is None:
|
411 |
+
return batch
|
412 |
+
|
413 |
+
sequence_length = np.array(batch["input_ids"]).shape[1]
|
414 |
+
padding_side = self.tokenizer.padding_side
|
415 |
+
if padding_side == "right":
|
416 |
+
batch["labels"] = [
|
417 |
+
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
|
418 |
+
]
|
419 |
+
else:
|
420 |
+
batch["labels"] = [
|
421 |
+
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
|
422 |
+
]
|
423 |
+
|
424 |
+
batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
|
425 |
+
return batch
|
426 |
+
|
427 |
+
|
428 |
+
def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
|
429 |
+
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
|
430 |
+
import torch
|
431 |
+
|
432 |
+
# Tensorize if necessary.
|
433 |
+
if isinstance(examples[0], (list, tuple, np.ndarray)):
|
434 |
+
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
|
435 |
+
|
436 |
+
length_of_first = examples[0].size(0)
|
437 |
+
|
438 |
+
# Check if padding is necessary.
|
439 |
+
|
440 |
+
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
|
441 |
+
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
|
442 |
+
return torch.stack(examples, dim=0)
|
443 |
+
|
444 |
+
# If yes, check if we have a `pad_token`.
|
445 |
+
if tokenizer._pad_token is None:
|
446 |
+
raise ValueError(
|
447 |
+
"You are attempting to pad samples but the tokenizer you are using"
|
448 |
+
f" ({tokenizer.__class__.__name__}) does not have a pad token."
|
449 |
+
)
|
450 |
+
|
451 |
+
# Creating the full tensor and filling it with our data.
|
452 |
+
max_length = max(x.size(0) for x in examples)
|
453 |
+
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
454 |
+
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
455 |
+
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
|
456 |
+
for i, example in enumerate(examples):
|
457 |
+
if tokenizer.padding_side == "right":
|
458 |
+
result[i, : example.shape[0]] = example
|
459 |
+
else:
|
460 |
+
result[i, -example.shape[0] :] = example
|
461 |
+
return result
|
462 |
+
|
463 |
+
|
464 |
+
def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
|
465 |
+
import tensorflow as tf
|
466 |
+
|
467 |
+
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
|
468 |
+
# Tensorize if necessary.
|
469 |
+
if isinstance(examples[0], (list, tuple)):
|
470 |
+
examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
|
471 |
+
|
472 |
+
# Check if padding is necessary.
|
473 |
+
length_of_first = len(examples[0])
|
474 |
+
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
|
475 |
+
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
|
476 |
+
return tf.stack(examples, axis=0)
|
477 |
+
|
478 |
+
# If yes, check if we have a `pad_token`.
|
479 |
+
if tokenizer._pad_token is None:
|
480 |
+
raise ValueError(
|
481 |
+
"You are attempting to pad samples but the tokenizer you are using"
|
482 |
+
f" ({tokenizer.__class__.__name__}) does not have a pad token."
|
483 |
+
)
|
484 |
+
|
485 |
+
# Creating the full tensor and filling it with our data.
|
486 |
+
max_length = max(len(x) for x in examples)
|
487 |
+
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
488 |
+
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
489 |
+
# result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
|
490 |
+
result = []
|
491 |
+
rank = tf.rank(examples[0])
|
492 |
+
paddings = np.zeros((rank, 2), dtype=np.int32)
|
493 |
+
for example in examples:
|
494 |
+
if tokenizer.padding_side == "right":
|
495 |
+
paddings[0, 1] = max_length - len(example)
|
496 |
+
else:
|
497 |
+
paddings[0, 0] = max_length - len(example)
|
498 |
+
result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
|
499 |
+
return tf.stack(result, axis=0)
|
500 |
+
|
501 |
+
|
502 |
+
def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
|
503 |
+
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
|
504 |
+
# Tensorize if necessary.
|
505 |
+
if isinstance(examples[0], (list, tuple)):
|
506 |
+
examples = [np.array(e, dtype=np.int64) for e in examples]
|
507 |
+
|
508 |
+
# Check if padding is necessary.
|
509 |
+
length_of_first = len(examples[0])
|
510 |
+
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
|
511 |
+
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
|
512 |
+
return np.stack(examples, axis=0)
|
513 |
+
|
514 |
+
# If yes, check if we have a `pad_token`.
|
515 |
+
if tokenizer._pad_token is None:
|
516 |
+
raise ValueError(
|
517 |
+
"You are attempting to pad samples but the tokenizer you are using"
|
518 |
+
f" ({tokenizer.__class__.__name__}) does not have a pad token."
|
519 |
+
)
|
520 |
+
|
521 |
+
# Creating the full tensor and filling it with our data.
|
522 |
+
max_length = max(len(x) for x in examples)
|
523 |
+
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
524 |
+
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
525 |
+
result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
|
526 |
+
for i, example in enumerate(examples):
|
527 |
+
if tokenizer.padding_side == "right":
|
528 |
+
result[i, : example.shape[0]] = example
|
529 |
+
else:
|
530 |
+
result[i, -example.shape[0] :] = example
|
531 |
+
return result
|
532 |
+
|
533 |
+
|
534 |
+
def tolist(x):
|
535 |
+
if isinstance(x, list):
|
536 |
+
return x
|
537 |
+
elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
|
538 |
+
x = x.numpy()
|
539 |
+
return x.tolist()
|
540 |
+
|
541 |
+
|
542 |
+
@dataclass
|
543 |
+
class DataCollatorForSeq2Seq:
|
544 |
+
"""
|
545 |
+
Data collator that will dynamically pad the inputs received, as well as the labels.
|
546 |
+
|
547 |
+
Args:
|
548 |
+
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
|
549 |
+
The tokenizer used for encoding the data.
|
550 |
+
model ([`PreTrainedModel`], *optional*):
|
551 |
+
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
|
552 |
+
prepare the *decoder_input_ids*
|
553 |
+
|
554 |
+
This is useful when using *label_smoothing* to avoid calculating loss twice.
|
555 |
+
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
|
556 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
|
557 |
+
among:
|
558 |
+
|
559 |
+
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
|
560 |
+
sequence is provided).
|
561 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
562 |
+
acceptable input length for the model if that argument is not provided.
|
563 |
+
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
|
564 |
+
max_length (`int`, *optional*):
|
565 |
+
Maximum length of the returned list and optionally padding length (see above).
|
566 |
+
pad_to_multiple_of (`int`, *optional*):
|
567 |
+
If set will pad the sequence to a multiple of the provided value.
|
568 |
+
|
569 |
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
|
570 |
+
7.5 (Volta).
|
571 |
+
label_pad_token_id (`int`, *optional*, defaults to -100):
|
572 |
+
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
|
573 |
+
return_tensors (`str`, *optional*, defaults to `"pt"`):
|
574 |
+
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
|
575 |
+
"""
|
576 |
+
|
577 |
+
tokenizer: PreTrainedTokenizerBase
|
578 |
+
model: Optional[Any] = None
|
579 |
+
padding: Union[bool, str, PaddingStrategy] = True
|
580 |
+
max_length: Optional[int] = None
|
581 |
+
pad_to_multiple_of: Optional[int] = None
|
582 |
+
label_pad_token_id: int = -100
|
583 |
+
return_tensors: str = "pt"
|
584 |
+
|
585 |
+
def __call__(self, features, return_tensors=None):
|
586 |
+
if return_tensors is None:
|
587 |
+
return_tensors = self.return_tensors
|
588 |
+
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
|
589 |
+
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
|
590 |
+
# same length to return tensors.
|
591 |
+
if labels is not None:
|
592 |
+
max_label_length = max(len(l) for l in labels)
|
593 |
+
if self.pad_to_multiple_of is not None:
|
594 |
+
max_label_length = (
|
595 |
+
(max_label_length + self.pad_to_multiple_of - 1)
|
596 |
+
// self.pad_to_multiple_of
|
597 |
+
* self.pad_to_multiple_of
|
598 |
+
)
|
599 |
+
|
600 |
+
padding_side = self.tokenizer.padding_side
|
601 |
+
for feature in features:
|
602 |
+
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
|
603 |
+
if isinstance(feature["labels"], list):
|
604 |
+
feature["labels"] = (
|
605 |
+
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
|
606 |
+
)
|
607 |
+
elif padding_side == "right":
|
608 |
+
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
|
609 |
+
else:
|
610 |
+
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
|
611 |
+
|
612 |
+
features = pad_without_fast_tokenizer_warning(
|
613 |
+
self.tokenizer,
|
614 |
+
features,
|
615 |
+
padding=self.padding,
|
616 |
+
max_length=self.max_length,
|
617 |
+
pad_to_multiple_of=self.pad_to_multiple_of,
|
618 |
+
return_tensors=return_tensors,
|
619 |
+
)
|
620 |
+
|
621 |
+
# prepare decoder_input_ids
|
622 |
+
if (
|
623 |
+
labels is not None
|
624 |
+
and self.model is not None
|
625 |
+
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
|
626 |
+
):
|
627 |
+
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
|
628 |
+
features["decoder_input_ids"] = decoder_input_ids
|
629 |
+
|
630 |
+
return features
|
631 |
+
|
632 |
+
|
633 |
+
@dataclass
|
634 |
+
class DataCollatorForLanguageModeling(DataCollatorMixin):
|
635 |
+
"""
|
636 |
+
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
|
637 |
+
are not all of the same length.
|
638 |
+
|
639 |
+
Args:
|
640 |
+
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
|
641 |
+
The tokenizer used for encoding the data.
|
642 |
+
mlm (`bool`, *optional*, defaults to `True`):
|
643 |
+
Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
|
644 |
+
with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
|
645 |
+
tokens and the value to predict for the masked token.
|
646 |
+
mlm_probability (`float`, *optional*, defaults to 0.15):
|
647 |
+
The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
|
648 |
+
pad_to_multiple_of (`int`, *optional*):
|
649 |
+
If set will pad the sequence to a multiple of the provided value.
|
650 |
+
return_tensors (`str`):
|
651 |
+
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
|
652 |
+
|
653 |
+
<Tip>
|
654 |
+
|
655 |
+
For best performance, this data collator should be used with a dataset having items that are dictionaries or
|
656 |
+
BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
|
657 |
+
[`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
|
658 |
+
|
659 |
+
</Tip>"""
|
660 |
+
|
661 |
+
tokenizer: PreTrainedTokenizerBase
|
662 |
+
mlm: bool = True
|
663 |
+
mlm_probability: float = 0.15
|
664 |
+
pad_to_multiple_of: Optional[int] = None
|
665 |
+
tf_experimental_compile: bool = False
|
666 |
+
return_tensors: str = "pt"
|
667 |
+
|
668 |
+
def __post_init__(self):
|
669 |
+
if self.mlm and self.tokenizer.mask_token is None:
|
670 |
+
raise ValueError(
|
671 |
+
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
|
672 |
+
"You should pass `mlm=False` to train on causal language modeling instead."
|
673 |
+
)
|
674 |
+
if self.tf_experimental_compile:
|
675 |
+
import tensorflow as tf
|
676 |
+
|
677 |
+
self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
|
678 |
+
|
679 |
+
@staticmethod
|
680 |
+
def tf_bernoulli(shape, probability):
|
681 |
+
import tensorflow as tf
|
682 |
+
|
683 |
+
prob_matrix = tf.fill(shape, probability)
|
684 |
+
return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
|
685 |
+
|
686 |
+
def tf_mask_tokens(
|
687 |
+
self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
|
688 |
+
) -> Tuple[Any, Any]:
|
689 |
+
"""
|
690 |
+
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
|
691 |
+
"""
|
692 |
+
import tensorflow as tf
|
693 |
+
|
694 |
+
mask_token_id = tf.cast(mask_token_id, inputs.dtype)
|
695 |
+
|
696 |
+
input_shape = tf.shape(inputs)
|
697 |
+
# 1 for a special token, 0 for a normal token in the special tokens mask
|
698 |
+
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
|
699 |
+
masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
|
700 |
+
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
|
701 |
+
labels = tf.where(masked_indices, inputs, -100)
|
702 |
+
|
703 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
704 |
+
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
|
705 |
+
|
706 |
+
inputs = tf.where(indices_replaced, mask_token_id, inputs)
|
707 |
+
|
708 |
+
# 10% of the time, we replace masked input tokens with random word
|
709 |
+
indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
|
710 |
+
random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=inputs.dtype)
|
711 |
+
|
712 |
+
inputs = tf.where(indices_random, random_words, inputs)
|
713 |
+
|
714 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
715 |
+
return inputs, labels
|
716 |
+
|
717 |
+
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
718 |
+
import tensorflow as tf
|
719 |
+
|
720 |
+
# Handle dict or lists with proper padding and conversion to tensor.
|
721 |
+
if isinstance(examples[0], Mapping):
|
722 |
+
batch = pad_without_fast_tokenizer_warning(
|
723 |
+
self.tokenizer, examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of
|
724 |
+
)
|
725 |
+
else:
|
726 |
+
batch = {
|
727 |
+
"input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
728 |
+
}
|
729 |
+
|
730 |
+
# If special token mask has been preprocessed, pop it from the dict.
|
731 |
+
special_tokens_mask = batch.pop("special_tokens_mask", None)
|
732 |
+
if self.mlm:
|
733 |
+
if special_tokens_mask is None:
|
734 |
+
special_tokens_mask = [
|
735 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
|
736 |
+
for val in batch["input_ids"].numpy().tolist()
|
737 |
+
]
|
738 |
+
# Cannot directly create as bool
|
739 |
+
special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
|
740 |
+
else:
|
741 |
+
special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
|
742 |
+
batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
|
743 |
+
tf.cast(batch["input_ids"], tf.int64),
|
744 |
+
special_tokens_mask=special_tokens_mask,
|
745 |
+
mask_token_id=self.tokenizer.mask_token_id,
|
746 |
+
vocab_size=len(self.tokenizer),
|
747 |
+
)
|
748 |
+
else:
|
749 |
+
labels = batch["input_ids"]
|
750 |
+
if self.tokenizer.pad_token_id is not None:
|
751 |
+
# Replace self.tokenizer.pad_token_id with -100
|
752 |
+
labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
|
753 |
+
else:
|
754 |
+
labels = tf.identity(labels) # Makes a copy, just in case
|
755 |
+
batch["labels"] = labels
|
756 |
+
return batch
|
757 |
+
|
758 |
+
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
759 |
+
# Handle dict or lists with proper padding and conversion to tensor.
|
760 |
+
if isinstance(examples[0], Mapping):
|
761 |
+
batch = pad_without_fast_tokenizer_warning(
|
762 |
+
self.tokenizer, examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of
|
763 |
+
)
|
764 |
+
else:
|
765 |
+
batch = {
|
766 |
+
"input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
767 |
+
}
|
768 |
+
|
769 |
+
# If special token mask has been preprocessed, pop it from the dict.
|
770 |
+
special_tokens_mask = batch.pop("special_tokens_mask", None)
|
771 |
+
if self.mlm:
|
772 |
+
batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
|
773 |
+
batch["input_ids"], special_tokens_mask=special_tokens_mask
|
774 |
+
)
|
775 |
+
else:
|
776 |
+
labels = batch["input_ids"].clone()
|
777 |
+
if self.tokenizer.pad_token_id is not None:
|
778 |
+
labels[labels == self.tokenizer.pad_token_id] = -100
|
779 |
+
batch["labels"] = labels
|
780 |
+
return batch
|
781 |
+
|
782 |
+
def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
|
783 |
+
"""
|
784 |
+
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
|
785 |
+
"""
|
786 |
+
import torch
|
787 |
+
|
788 |
+
labels = inputs.clone()
|
789 |
+
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
|
790 |
+
probability_matrix = torch.full(labels.shape, self.mlm_probability)
|
791 |
+
if special_tokens_mask is None:
|
792 |
+
special_tokens_mask = [
|
793 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
|
794 |
+
]
|
795 |
+
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
|
796 |
+
else:
|
797 |
+
special_tokens_mask = special_tokens_mask.bool()
|
798 |
+
|
799 |
+
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
|
800 |
+
masked_indices = torch.bernoulli(probability_matrix).bool()
|
801 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
802 |
+
|
803 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
804 |
+
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
|
805 |
+
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
|
806 |
+
|
807 |
+
# 10% of the time, we replace masked input tokens with random word
|
808 |
+
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
809 |
+
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
|
810 |
+
inputs[indices_random] = random_words[indices_random]
|
811 |
+
|
812 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
813 |
+
return inputs, labels
|
814 |
+
|
815 |
+
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
816 |
+
# Handle dict or lists with proper padding and conversion to tensor.
|
817 |
+
if isinstance(examples[0], Mapping):
|
818 |
+
batch = pad_without_fast_tokenizer_warning(
|
819 |
+
self.tokenizer, examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of
|
820 |
+
)
|
821 |
+
else:
|
822 |
+
batch = {
|
823 |
+
"input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
824 |
+
}
|
825 |
+
|
826 |
+
# If special token mask has been preprocessed, pop it from the dict.
|
827 |
+
special_tokens_mask = batch.pop("special_tokens_mask", None)
|
828 |
+
if self.mlm:
|
829 |
+
batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
|
830 |
+
batch["input_ids"], special_tokens_mask=special_tokens_mask
|
831 |
+
)
|
832 |
+
else:
|
833 |
+
labels = np.copy(batch["input_ids"])
|
834 |
+
if self.tokenizer.pad_token_id is not None:
|
835 |
+
labels[labels == self.tokenizer.pad_token_id] = -100
|
836 |
+
batch["labels"] = labels
|
837 |
+
return batch
|
838 |
+
|
839 |
+
def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
|
840 |
+
"""
|
841 |
+
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
|
842 |
+
"""
|
843 |
+
labels = np.copy(inputs)
|
844 |
+
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
|
845 |
+
probability_matrix = np.full(labels.shape, self.mlm_probability)
|
846 |
+
if special_tokens_mask is None:
|
847 |
+
special_tokens_mask = [
|
848 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
|
849 |
+
]
|
850 |
+
special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
|
851 |
+
else:
|
852 |
+
special_tokens_mask = special_tokens_mask.astype(bool)
|
853 |
+
|
854 |
+
probability_matrix[special_tokens_mask] = 0
|
855 |
+
# Numpy doesn't have bernoulli, so we use a binomial with 1 trial
|
856 |
+
masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
|
857 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
858 |
+
|
859 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
860 |
+
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
|
861 |
+
inputs[indices_replaced] = self.tokenizer.mask_token_id
|
862 |
+
|
863 |
+
# 10% of the time, we replace masked input tokens with random word
|
864 |
+
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
865 |
+
indices_random = (
|
866 |
+
np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
|
867 |
+
)
|
868 |
+
random_words = np.random.randint(
|
869 |
+
low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
|
870 |
+
)
|
871 |
+
inputs[indices_random] = random_words
|
872 |
+
|
873 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
874 |
+
return inputs, labels
|
875 |
+
|
876 |
+
|
877 |
+
@dataclass
|
878 |
+
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
|
879 |
+
"""
|
880 |
+
Data collator used for language modeling that masks entire words.
|
881 |
+
|
882 |
+
- collates batches of tensors, honoring their tokenizer's pad_token
|
883 |
+
- preprocesses batches for masked language modeling
|
884 |
+
|
885 |
+
<Tip>
|
886 |
+
|
887 |
+
This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
|
888 |
+
that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
|
889 |
+
produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
|
890 |
+
|
891 |
+
</Tip>"""
|
892 |
+
|
893 |
+
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
894 |
+
if isinstance(examples[0], Mapping):
|
895 |
+
input_ids = [e["input_ids"] for e in examples]
|
896 |
+
else:
|
897 |
+
input_ids = examples
|
898 |
+
examples = [{"input_ids": e} for e in examples]
|
899 |
+
|
900 |
+
batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
901 |
+
|
902 |
+
mask_labels = []
|
903 |
+
for e in examples:
|
904 |
+
ref_tokens = []
|
905 |
+
for id in tolist(e["input_ids"]):
|
906 |
+
token = self.tokenizer._convert_id_to_token(id)
|
907 |
+
ref_tokens.append(token)
|
908 |
+
|
909 |
+
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
|
910 |
+
if "chinese_ref" in e:
|
911 |
+
ref_pos = tolist(e["chinese_ref"])
|
912 |
+
len_seq = len(e["input_ids"])
|
913 |
+
for i in range(len_seq):
|
914 |
+
if i in ref_pos:
|
915 |
+
ref_tokens[i] = "##" + ref_tokens[i]
|
916 |
+
mask_labels.append(self._whole_word_mask(ref_tokens))
|
917 |
+
batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
918 |
+
inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
|
919 |
+
return {"input_ids": inputs, "labels": labels}
|
920 |
+
|
921 |
+
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
922 |
+
import tensorflow as tf
|
923 |
+
|
924 |
+
if isinstance(examples[0], Mapping):
|
925 |
+
input_ids = [e["input_ids"] for e in examples]
|
926 |
+
else:
|
927 |
+
input_ids = examples
|
928 |
+
examples = [{"input_ids": e} for e in examples]
|
929 |
+
|
930 |
+
batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
931 |
+
|
932 |
+
mask_labels = []
|
933 |
+
for e in examples:
|
934 |
+
ref_tokens = []
|
935 |
+
for id in tolist(e["input_ids"]):
|
936 |
+
token = self.tokenizer._convert_id_to_token(id)
|
937 |
+
ref_tokens.append(token)
|
938 |
+
|
939 |
+
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
|
940 |
+
if "chinese_ref" in e:
|
941 |
+
ref_pos = tolist(e["chinese_ref"])
|
942 |
+
len_seq = len(e["input_ids"])
|
943 |
+
for i in range(len_seq):
|
944 |
+
if i in ref_pos:
|
945 |
+
ref_tokens[i] = "##" + ref_tokens[i]
|
946 |
+
mask_labels.append(self._whole_word_mask(ref_tokens))
|
947 |
+
batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
948 |
+
inputs, labels = self.tf_mask_tokens(tf.cast(batch_input, tf.int64), batch_mask)
|
949 |
+
return {"input_ids": inputs, "labels": labels}
|
950 |
+
|
951 |
+
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
952 |
+
if isinstance(examples[0], Mapping):
|
953 |
+
input_ids = [e["input_ids"] for e in examples]
|
954 |
+
else:
|
955 |
+
input_ids = examples
|
956 |
+
examples = [{"input_ids": e} for e in examples]
|
957 |
+
|
958 |
+
batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
959 |
+
|
960 |
+
mask_labels = []
|
961 |
+
for e in examples:
|
962 |
+
ref_tokens = []
|
963 |
+
for id in tolist(e["input_ids"]):
|
964 |
+
token = self.tokenizer._convert_id_to_token(id)
|
965 |
+
ref_tokens.append(token)
|
966 |
+
|
967 |
+
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
|
968 |
+
if "chinese_ref" in e:
|
969 |
+
ref_pos = tolist(e["chinese_ref"])
|
970 |
+
len_seq = len(e["input_ids"])
|
971 |
+
for i in range(len_seq):
|
972 |
+
if i in ref_pos:
|
973 |
+
ref_tokens[i] = "##" + ref_tokens[i]
|
974 |
+
mask_labels.append(self._whole_word_mask(ref_tokens))
|
975 |
+
batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
|
976 |
+
inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
|
977 |
+
return {"input_ids": inputs, "labels": labels}
|
978 |
+
|
979 |
+
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
|
980 |
+
"""
|
981 |
+
Get 0/1 labels for masked tokens with whole word mask proxy
|
982 |
+
"""
|
983 |
+
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
|
984 |
+
warnings.warn(
|
985 |
+
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
|
986 |
+
"Please refer to the documentation for more information."
|
987 |
+
)
|
988 |
+
|
989 |
+
cand_indexes = []
|
990 |
+
for i, token in enumerate(input_tokens):
|
991 |
+
if token == "[CLS]" or token == "[SEP]":
|
992 |
+
continue
|
993 |
+
|
994 |
+
if len(cand_indexes) >= 1 and token.startswith("##"):
|
995 |
+
cand_indexes[-1].append(i)
|
996 |
+
else:
|
997 |
+
cand_indexes.append([i])
|
998 |
+
|
999 |
+
random.shuffle(cand_indexes)
|
1000 |
+
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
|
1001 |
+
masked_lms = []
|
1002 |
+
covered_indexes = set()
|
1003 |
+
for index_set in cand_indexes:
|
1004 |
+
if len(masked_lms) >= num_to_predict:
|
1005 |
+
break
|
1006 |
+
# If adding a whole-word mask would exceed the maximum number of
|
1007 |
+
# predictions, then just skip this candidate.
|
1008 |
+
if len(masked_lms) + len(index_set) > num_to_predict:
|
1009 |
+
continue
|
1010 |
+
is_any_index_covered = False
|
1011 |
+
for index in index_set:
|
1012 |
+
if index in covered_indexes:
|
1013 |
+
is_any_index_covered = True
|
1014 |
+
break
|
1015 |
+
if is_any_index_covered:
|
1016 |
+
continue
|
1017 |
+
for index in index_set:
|
1018 |
+
covered_indexes.add(index)
|
1019 |
+
masked_lms.append(index)
|
1020 |
+
|
1021 |
+
if len(covered_indexes) != len(masked_lms):
|
1022 |
+
raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
|
1023 |
+
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
|
1024 |
+
return mask_labels
|
1025 |
+
|
1026 |
+
def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
|
1027 |
+
"""
|
1028 |
+
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
|
1029 |
+
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
|
1030 |
+
"""
|
1031 |
+
import torch
|
1032 |
+
|
1033 |
+
if self.tokenizer.mask_token is None:
|
1034 |
+
raise ValueError(
|
1035 |
+
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
|
1036 |
+
" --mlm flag if you want to use this tokenizer."
|
1037 |
+
)
|
1038 |
+
labels = inputs.clone()
|
1039 |
+
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
|
1040 |
+
|
1041 |
+
probability_matrix = mask_labels
|
1042 |
+
|
1043 |
+
special_tokens_mask = [
|
1044 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
|
1045 |
+
]
|
1046 |
+
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
|
1047 |
+
if self.tokenizer._pad_token is not None:
|
1048 |
+
padding_mask = labels.eq(self.tokenizer.pad_token_id)
|
1049 |
+
probability_matrix.masked_fill_(padding_mask, value=0.0)
|
1050 |
+
|
1051 |
+
masked_indices = probability_matrix.bool()
|
1052 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
1053 |
+
|
1054 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
1055 |
+
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
|
1056 |
+
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
|
1057 |
+
|
1058 |
+
# 10% of the time, we replace masked input tokens with random word
|
1059 |
+
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
1060 |
+
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
|
1061 |
+
inputs[indices_random] = random_words[indices_random]
|
1062 |
+
|
1063 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
1064 |
+
return inputs, labels
|
1065 |
+
|
1066 |
+
def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
|
1067 |
+
"""
|
1068 |
+
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
|
1069 |
+
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
|
1070 |
+
"""
|
1071 |
+
import tensorflow as tf
|
1072 |
+
|
1073 |
+
input_shape = tf.shape(inputs)
|
1074 |
+
if self.tokenizer.mask_token is None:
|
1075 |
+
raise ValueError(
|
1076 |
+
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
|
1077 |
+
" --mlm flag if you want to use this tokenizer."
|
1078 |
+
)
|
1079 |
+
labels = tf.identity(inputs)
|
1080 |
+
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
|
1081 |
+
|
1082 |
+
masked_indices = tf.cast(mask_labels, tf.bool)
|
1083 |
+
|
1084 |
+
special_tokens_mask = [
|
1085 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
|
1086 |
+
]
|
1087 |
+
masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
|
1088 |
+
if self.tokenizer._pad_token is not None:
|
1089 |
+
padding_mask = inputs == self.tokenizer.pad_token_id
|
1090 |
+
masked_indices = masked_indices & ~padding_mask
|
1091 |
+
|
1092 |
+
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
|
1093 |
+
labels = tf.where(masked_indices, inputs, -100)
|
1094 |
+
|
1095 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
1096 |
+
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
|
1097 |
+
|
1098 |
+
inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
|
1099 |
+
|
1100 |
+
# 10% of the time, we replace masked input tokens with random word
|
1101 |
+
indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
|
1102 |
+
random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
|
1103 |
+
inputs = tf.where(indices_random, random_words, inputs)
|
1104 |
+
|
1105 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
1106 |
+
return inputs, labels
|
1107 |
+
|
1108 |
+
def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
|
1109 |
+
"""
|
1110 |
+
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
|
1111 |
+
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
|
1112 |
+
"""
|
1113 |
+
if self.tokenizer.mask_token is None:
|
1114 |
+
raise ValueError(
|
1115 |
+
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
|
1116 |
+
" --mlm flag if you want to use this tokenizer."
|
1117 |
+
)
|
1118 |
+
labels = np.copy(inputs)
|
1119 |
+
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
|
1120 |
+
|
1121 |
+
masked_indices = mask_labels.astype(bool)
|
1122 |
+
|
1123 |
+
special_tokens_mask = [
|
1124 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
|
1125 |
+
]
|
1126 |
+
masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
|
1127 |
+
if self.tokenizer._pad_token is not None:
|
1128 |
+
padding_mask = labels == self.tokenizer.pad_token_id
|
1129 |
+
masked_indices[padding_mask] = 0
|
1130 |
+
|
1131 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
1132 |
+
|
1133 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
1134 |
+
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
|
1135 |
+
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
|
1136 |
+
|
1137 |
+
# 10% of the time, we replace masked input tokens with random word
|
1138 |
+
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
1139 |
+
indices_random = (
|
1140 |
+
np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
|
1141 |
+
)
|
1142 |
+
random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
|
1143 |
+
inputs[indices_random] = random_words[indices_random]
|
1144 |
+
|
1145 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
1146 |
+
return inputs, labels
|
1147 |
+
|
1148 |
+
|
1149 |
+
@dataclass
|
1150 |
+
class DataCollatorForSOP(DataCollatorForLanguageModeling):
|
1151 |
+
"""
|
1152 |
+
Data collator used for sentence order prediction task.
|
1153 |
+
|
1154 |
+
- collates batches of tensors, honoring their tokenizer's pad_token
|
1155 |
+
- preprocesses batches for both masked language modeling and sentence order prediction
|
1156 |
+
"""
|
1157 |
+
|
1158 |
+
def __init__(self, *args, **kwargs):
|
1159 |
+
warnings.warn(
|
1160 |
+
"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
|
1161 |
+
"DataCollatorForLanguageModeling instead.",
|
1162 |
+
FutureWarning,
|
1163 |
+
)
|
1164 |
+
|
1165 |
+
def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
|
1166 |
+
import torch
|
1167 |
+
from torch.nn.utils.rnn import pad_sequence
|
1168 |
+
|
1169 |
+
input_ids = [example["input_ids"] for example in examples]
|
1170 |
+
input_ids = _torch_collate_batch(input_ids, self.tokenizer)
|
1171 |
+
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
|
1172 |
+
|
1173 |
+
token_type_ids = [example["token_type_ids"] for example in examples]
|
1174 |
+
# size of segment_ids varied because randomness, padding zero to the end as the original implementation
|
1175 |
+
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
|
1176 |
+
|
1177 |
+
sop_label_list = [example["sentence_order_label"] for example in examples]
|
1178 |
+
sentence_order_label = torch.stack(sop_label_list)
|
1179 |
+
|
1180 |
+
return {
|
1181 |
+
"input_ids": input_ids,
|
1182 |
+
"labels": labels,
|
1183 |
+
"attention_mask": attention_mask,
|
1184 |
+
"token_type_ids": token_type_ids,
|
1185 |
+
"sentence_order_label": sentence_order_label,
|
1186 |
+
}
|
1187 |
+
|
1188 |
+
def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
|
1189 |
+
"""
|
1190 |
+
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
|
1191 |
+
original. N-gram not applied yet.
|
1192 |
+
"""
|
1193 |
+
import torch
|
1194 |
+
|
1195 |
+
if self.tokenizer.mask_token is None:
|
1196 |
+
raise ValueError(
|
1197 |
+
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
|
1198 |
+
" --mlm flag if you want to use this tokenizer."
|
1199 |
+
)
|
1200 |
+
|
1201 |
+
labels = inputs.clone()
|
1202 |
+
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
|
1203 |
+
probability_matrix = torch.full(labels.shape, self.mlm_probability)
|
1204 |
+
special_tokens_mask = [
|
1205 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
|
1206 |
+
]
|
1207 |
+
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
|
1208 |
+
if self.tokenizer._pad_token is not None:
|
1209 |
+
padding_mask = labels.eq(self.tokenizer.pad_token_id)
|
1210 |
+
probability_matrix.masked_fill_(padding_mask, value=0.0)
|
1211 |
+
masked_indices = torch.bernoulli(probability_matrix).bool()
|
1212 |
+
# probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
|
1213 |
+
attention_mask = (~masked_indices).float()
|
1214 |
+
if self.tokenizer._pad_token is not None:
|
1215 |
+
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
|
1216 |
+
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
|
1217 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
|
1218 |
+
|
1219 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
1220 |
+
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
|
1221 |
+
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
|
1222 |
+
|
1223 |
+
# 10% of the time, we replace masked input tokens with random word
|
1224 |
+
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
1225 |
+
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
|
1226 |
+
inputs[indices_random] = random_words[indices_random]
|
1227 |
+
|
1228 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
1229 |
+
return inputs, labels, attention_mask
|
1230 |
+
|
1231 |
+
|
1232 |
+
@dataclass
|
1233 |
+
class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
|
1234 |
+
"""
|
1235 |
+
Data collator used for permutation language modeling.
|
1236 |
+
|
1237 |
+
- collates batches of tensors, honoring their tokenizer's pad_token
|
1238 |
+
- preprocesses batches for permutation language modeling with procedures specific to XLNet
|
1239 |
+
"""
|
1240 |
+
|
1241 |
+
tokenizer: PreTrainedTokenizerBase
|
1242 |
+
plm_probability: float = 1 / 6
|
1243 |
+
max_span_length: int = 5 # maximum length of a span of masked tokens
|
1244 |
+
return_tensors: str = "pt"
|
1245 |
+
|
1246 |
+
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
1247 |
+
if isinstance(examples[0], Mapping):
|
1248 |
+
examples = [e["input_ids"] for e in examples]
|
1249 |
+
batch = _torch_collate_batch(examples, self.tokenizer)
|
1250 |
+
inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
|
1251 |
+
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
|
1252 |
+
|
1253 |
+
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
1254 |
+
if isinstance(examples[0], Mapping):
|
1255 |
+
examples = [e["input_ids"] for e in examples]
|
1256 |
+
batch = _tf_collate_batch(examples, self.tokenizer)
|
1257 |
+
inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
|
1258 |
+
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
|
1259 |
+
|
1260 |
+
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
|
1261 |
+
if isinstance(examples[0], Mapping):
|
1262 |
+
examples = [e["input_ids"] for e in examples]
|
1263 |
+
batch = _numpy_collate_batch(examples, self.tokenizer)
|
1264 |
+
inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
|
1265 |
+
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
|
1266 |
+
|
1267 |
+
def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
|
1268 |
+
"""
|
1269 |
+
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
|
1270 |
+
|
1271 |
+
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
|
1272 |
+
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
|
1273 |
+
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
|
1274 |
+
masked
|
1275 |
+
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
|
1276 |
+
span_length]` and mask tokens `start_index:start_index + span_length`
|
1277 |
+
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
|
1278 |
+
sequence to be processed), repeat from Step 1.
|
1279 |
+
"""
|
1280 |
+
import torch
|
1281 |
+
|
1282 |
+
if self.tokenizer.mask_token is None:
|
1283 |
+
raise ValueError(
|
1284 |
+
"This tokenizer does not have a mask token which is necessary for permutation language modeling."
|
1285 |
+
" Please add a mask token if you want to use this tokenizer."
|
1286 |
+
)
|
1287 |
+
|
1288 |
+
if inputs.size(1) % 2 != 0:
|
1289 |
+
raise ValueError(
|
1290 |
+
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
|
1291 |
+
" relevant comments in source code for details."
|
1292 |
+
)
|
1293 |
+
|
1294 |
+
labels = inputs.clone()
|
1295 |
+
# Creating the mask and target_mapping tensors
|
1296 |
+
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
|
1297 |
+
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
|
1298 |
+
|
1299 |
+
for i in range(labels.size(0)):
|
1300 |
+
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
|
1301 |
+
cur_len = 0
|
1302 |
+
max_len = labels.size(1)
|
1303 |
+
|
1304 |
+
while cur_len < max_len:
|
1305 |
+
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
|
1306 |
+
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
|
1307 |
+
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
|
1308 |
+
context_length = int(span_length / self.plm_probability)
|
1309 |
+
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
|
1310 |
+
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
|
1311 |
+
masked_indices[i, start_index : start_index + span_length] = 1
|
1312 |
+
# Set `cur_len = cur_len + context_length`
|
1313 |
+
cur_len += context_length
|
1314 |
+
|
1315 |
+
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
|
1316 |
+
# the i-th predict corresponds to the i-th token.
|
1317 |
+
target_mapping[i] = torch.eye(labels.size(1))
|
1318 |
+
|
1319 |
+
special_tokens_mask = torch.tensor(
|
1320 |
+
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
|
1321 |
+
dtype=torch.bool,
|
1322 |
+
)
|
1323 |
+
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
|
1324 |
+
if self.tokenizer._pad_token is not None:
|
1325 |
+
padding_mask = labels.eq(self.tokenizer.pad_token_id)
|
1326 |
+
masked_indices.masked_fill_(padding_mask, value=0.0)
|
1327 |
+
|
1328 |
+
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
|
1329 |
+
non_func_mask = ~(padding_mask | special_tokens_mask)
|
1330 |
+
|
1331 |
+
inputs[masked_indices] = self.tokenizer.mask_token_id
|
1332 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
1333 |
+
|
1334 |
+
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
|
1335 |
+
|
1336 |
+
for i in range(labels.size(0)):
|
1337 |
+
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
|
1338 |
+
# determine which tokens a given token can attend to (encoded in `perm_mask`).
|
1339 |
+
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
|
1340 |
+
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
|
1341 |
+
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
|
1342 |
+
# This requires that the sequence length be even.
|
1343 |
+
|
1344 |
+
# Create a linear factorisation order
|
1345 |
+
perm_index = torch.arange(labels.size(1))
|
1346 |
+
# Split this into two halves, assuming that half the sequence is reused each time
|
1347 |
+
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
|
1348 |
+
# Permute the two halves such that they do not cross over
|
1349 |
+
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
|
1350 |
+
# Flatten this out into the desired permuted factorisation order
|
1351 |
+
perm_index = torch.flatten(perm_index.transpose(0, 1))
|
1352 |
+
# Set the permutation indices of non-masked (non-functional) tokens to the
|
1353 |
+
# smallest index (-1) so that:
|
1354 |
+
# (1) They can be seen by all other positions
|
1355 |
+
# (2) They cannot see masked positions, so there won't be information leak
|
1356 |
+
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
|
1357 |
+
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
|
1358 |
+
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
|
1359 |
+
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
|
1360 |
+
perm_mask[i] = (
|
1361 |
+
perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
|
1362 |
+
) & masked_indices[i]
|
1363 |
+
|
1364 |
+
return inputs.long(), perm_mask, target_mapping, labels.long()
|
1365 |
+
|
1366 |
+
def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
|
1367 |
+
"""
|
1368 |
+
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
|
1369 |
+
|
1370 |
+
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
|
1371 |
+
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
|
1372 |
+
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
|
1373 |
+
masked
|
1374 |
+
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
|
1375 |
+
span_length]` and mask tokens `start_index:start_index + span_length`
|
1376 |
+
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
|
1377 |
+
sequence to be processed), repeat from Step 1.
|
1378 |
+
"""
|
1379 |
+
import tensorflow as tf
|
1380 |
+
|
1381 |
+
if self.tokenizer.mask_token is None:
|
1382 |
+
raise ValueError(
|
1383 |
+
"This tokenizer does not have a mask token which is necessary for permutation language modeling."
|
1384 |
+
" Please add a mask token if you want to use this tokenizer."
|
1385 |
+
)
|
1386 |
+
|
1387 |
+
if tf.shape(inputs)[1] % 2 != 0:
|
1388 |
+
raise ValueError(
|
1389 |
+
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
|
1390 |
+
" relevant comments in source code for details."
|
1391 |
+
)
|
1392 |
+
|
1393 |
+
labels = tf.identity(inputs)
|
1394 |
+
# Creating the mask and target_mapping tensors
|
1395 |
+
masked_indices = np.full(labels.shape.as_list(), 0, dtype=bool)
|
1396 |
+
labels_shape = tf.shape(labels)
|
1397 |
+
target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
|
1398 |
+
|
1399 |
+
for i in range(len(labels)):
|
1400 |
+
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
|
1401 |
+
cur_len = 0
|
1402 |
+
max_len = tf.shape(labels)[1]
|
1403 |
+
|
1404 |
+
while cur_len < max_len:
|
1405 |
+
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
|
1406 |
+
span_length = randint(1, self.max_span_length + 1)
|
1407 |
+
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
|
1408 |
+
context_length = int(span_length / self.plm_probability)
|
1409 |
+
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
|
1410 |
+
start_index = cur_len + randint(0, context_length - span_length + 1)
|
1411 |
+
masked_indices[i, start_index : start_index + span_length] = 1
|
1412 |
+
# Set `cur_len = cur_len + context_length`
|
1413 |
+
cur_len += context_length
|
1414 |
+
|
1415 |
+
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
|
1416 |
+
# the i-th predict corresponds to the i-th token.
|
1417 |
+
target_mapping[i] = np.eye(labels_shape[1])
|
1418 |
+
masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
|
1419 |
+
target_mapping = tf.convert_to_tensor(target_mapping)
|
1420 |
+
special_tokens_mask = tf.convert_to_tensor(
|
1421 |
+
[
|
1422 |
+
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
|
1423 |
+
for val in labels.numpy().tolist()
|
1424 |
+
],
|
1425 |
+
)
|
1426 |
+
special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
|
1427 |
+
masked_indices = masked_indices & ~special_tokens_mask
|
1428 |
+
if self.tokenizer._pad_token is not None:
|
1429 |
+
padding_mask = labels == self.tokenizer.pad_token_id
|
1430 |
+
masked_indices = masked_indices & ~padding_mask
|
1431 |
+
|
1432 |
+
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
|
1433 |
+
non_func_mask = ~(padding_mask | special_tokens_mask)
|
1434 |
+
|
1435 |
+
inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
|
1436 |
+
labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
|
1437 |
+
|
1438 |
+
perm_mask = []
|
1439 |
+
|
1440 |
+
for i in range(len(labels)):
|
1441 |
+
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
|
1442 |
+
# determine which tokens a given token can attend to (encoded in `perm_mask`).
|
1443 |
+
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
|
1444 |
+
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
|
1445 |
+
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
|
1446 |
+
# This requires that the sequence length be even.
|
1447 |
+
|
1448 |
+
# Create a linear factorisation order
|
1449 |
+
# tf.range is the equivalent of torch.arange
|
1450 |
+
perm_index = tf.range(labels_shape[1])
|
1451 |
+
# Split this into two halves, assuming that half the sequence is reused each time
|
1452 |
+
perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
|
1453 |
+
# Permute the two halves such that they do not cross over
|
1454 |
+
perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
|
1455 |
+
# Flatten this out into the desired permuted factorisation order
|
1456 |
+
perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
|
1457 |
+
# Set the permutation indices of non-masked (non-functional) tokens to the
|
1458 |
+
# smallest index (-1) so that:
|
1459 |
+
# (1) They can be seen by all other positions
|
1460 |
+
# (2) They cannot see masked positions, so there won't be information leak
|
1461 |
+
perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
|
1462 |
+
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
|
1463 |
+
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
|
1464 |
+
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
|
1465 |
+
perm_mask.append(
|
1466 |
+
(tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
|
1467 |
+
& masked_indices[i]
|
1468 |
+
)
|
1469 |
+
perm_mask = tf.stack(perm_mask, axis=0)
|
1470 |
+
|
1471 |
+
return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
|
1472 |
+
|
1473 |
+
def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
|
1474 |
+
"""
|
1475 |
+
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
|
1476 |
+
|
1477 |
+
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
|
1478 |
+
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
|
1479 |
+
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
|
1480 |
+
masked
|
1481 |
+
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
|
1482 |
+
span_length]` and mask tokens `start_index:start_index + span_length`
|
1483 |
+
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
|
1484 |
+
sequence to be processed), repeat from Step 1.
|
1485 |
+
"""
|
1486 |
+
if self.tokenizer.mask_token is None:
|
1487 |
+
raise ValueError(
|
1488 |
+
"This tokenizer does not have a mask token which is necessary for permutation language modeling."
|
1489 |
+
" Please add a mask token if you want to use this tokenizer."
|
1490 |
+
)
|
1491 |
+
|
1492 |
+
if inputs.shape[1] % 2 != 0:
|
1493 |
+
raise ValueError(
|
1494 |
+
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
|
1495 |
+
" relevant comments in source code for details."
|
1496 |
+
)
|
1497 |
+
|
1498 |
+
labels = np.copy(inputs)
|
1499 |
+
# Creating the mask and target_mapping tensors
|
1500 |
+
masked_indices = np.full(labels.shape, 0, dtype=bool)
|
1501 |
+
target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
|
1502 |
+
|
1503 |
+
for i in range(labels.shape[0]):
|
1504 |
+
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
|
1505 |
+
cur_len = 0
|
1506 |
+
max_len = labels.shape[1]
|
1507 |
+
|
1508 |
+
while cur_len < max_len:
|
1509 |
+
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
|
1510 |
+
span_length = randint(1, self.max_span_length + 1)
|
1511 |
+
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
|
1512 |
+
context_length = int(span_length / self.plm_probability)
|
1513 |
+
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
|
1514 |
+
start_index = cur_len + randint(0, context_length - span_length + 1)
|
1515 |
+
masked_indices[i, start_index : start_index + span_length] = 1
|
1516 |
+
# Set `cur_len = cur_len + context_length`
|
1517 |
+
cur_len += context_length
|
1518 |
+
|
1519 |
+
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
|
1520 |
+
# the i-th predict corresponds to the i-th token.
|
1521 |
+
target_mapping[i] = np.eye(labels.shape[1])
|
1522 |
+
|
1523 |
+
special_tokens_mask = np.array(
|
1524 |
+
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
|
1525 |
+
dtype=bool,
|
1526 |
+
)
|
1527 |
+
masked_indices[special_tokens_mask] = 0
|
1528 |
+
if self.tokenizer._pad_token is not None:
|
1529 |
+
padding_mask = labels == self.tokenizer.pad_token_id
|
1530 |
+
masked_indices[padding_mask] = 0.0
|
1531 |
+
|
1532 |
+
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
|
1533 |
+
non_func_mask = ~(padding_mask | special_tokens_mask)
|
1534 |
+
|
1535 |
+
inputs[masked_indices] = self.tokenizer.mask_token_id
|
1536 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
1537 |
+
|
1538 |
+
perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
|
1539 |
+
|
1540 |
+
for i in range(labels.shape[0]):
|
1541 |
+
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
|
1542 |
+
# determine which tokens a given token can attend to (encoded in `perm_mask`).
|
1543 |
+
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
|
1544 |
+
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
|
1545 |
+
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
|
1546 |
+
# This requires that the sequence length be even.
|
1547 |
+
|
1548 |
+
# Create a linear factorisation order
|
1549 |
+
perm_index = np.arange(labels.shape[1])
|
1550 |
+
# Split this into two halves, assuming that half the sequence is reused each time
|
1551 |
+
perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
|
1552 |
+
# Permute the two halves such that they do not cross over
|
1553 |
+
np.random.shuffle(perm_index)
|
1554 |
+
# Flatten this out into the desired permuted factorisation order
|
1555 |
+
perm_index = perm_index.T.flatten()
|
1556 |
+
# Set the permutation indices of non-masked (non-functional) tokens to the
|
1557 |
+
# smallest index (-1) so that:
|
1558 |
+
# (1) They can be seen by all other positions
|
1559 |
+
# (2) They cannot see masked positions, so there won't be information leak
|
1560 |
+
perm_index[~masked_indices[i] & non_func_mask[i]] = -1
|
1561 |
+
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
|
1562 |
+
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
|
1563 |
+
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
|
1564 |
+
perm_mask[i] = (
|
1565 |
+
perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
|
1566 |
+
) & masked_indices[i]
|
1567 |
+
|
1568 |
+
return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
|
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc
ADDED
Binary file (6.37 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/glue.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
import time
|
17 |
+
import warnings
|
18 |
+
from dataclasses import dataclass, field
|
19 |
+
from enum import Enum
|
20 |
+
from typing import List, Optional, Union
|
21 |
+
|
22 |
+
import torch
|
23 |
+
from filelock import FileLock
|
24 |
+
from torch.utils.data import Dataset
|
25 |
+
|
26 |
+
from ...tokenization_utils_base import PreTrainedTokenizerBase
|
27 |
+
from ...utils import logging
|
28 |
+
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
|
29 |
+
from ..processors.utils import InputFeatures
|
30 |
+
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
|
35 |
+
@dataclass
|
36 |
+
class GlueDataTrainingArguments:
|
37 |
+
"""
|
38 |
+
Arguments pertaining to what data we are going to input our model for training and eval.
|
39 |
+
|
40 |
+
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
|
41 |
+
line.
|
42 |
+
"""
|
43 |
+
|
44 |
+
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
|
45 |
+
data_dir: str = field(
|
46 |
+
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
|
47 |
+
)
|
48 |
+
max_seq_length: int = field(
|
49 |
+
default=128,
|
50 |
+
metadata={
|
51 |
+
"help": (
|
52 |
+
"The maximum total input sequence length after tokenization. Sequences longer "
|
53 |
+
"than this will be truncated, sequences shorter will be padded."
|
54 |
+
)
|
55 |
+
},
|
56 |
+
)
|
57 |
+
overwrite_cache: bool = field(
|
58 |
+
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
59 |
+
)
|
60 |
+
|
61 |
+
def __post_init__(self):
|
62 |
+
self.task_name = self.task_name.lower()
|
63 |
+
|
64 |
+
|
65 |
+
class Split(Enum):
|
66 |
+
train = "train"
|
67 |
+
dev = "dev"
|
68 |
+
test = "test"
|
69 |
+
|
70 |
+
|
71 |
+
class GlueDataset(Dataset):
|
72 |
+
"""
|
73 |
+
This will be superseded by a framework-agnostic approach soon.
|
74 |
+
"""
|
75 |
+
|
76 |
+
args: GlueDataTrainingArguments
|
77 |
+
output_mode: str
|
78 |
+
features: List[InputFeatures]
|
79 |
+
|
80 |
+
def __init__(
|
81 |
+
self,
|
82 |
+
args: GlueDataTrainingArguments,
|
83 |
+
tokenizer: PreTrainedTokenizerBase,
|
84 |
+
limit_length: Optional[int] = None,
|
85 |
+
mode: Union[str, Split] = Split.train,
|
86 |
+
cache_dir: Optional[str] = None,
|
87 |
+
):
|
88 |
+
warnings.warn(
|
89 |
+
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
|
90 |
+
"library. You can have a look at this example script for pointers: "
|
91 |
+
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
|
92 |
+
FutureWarning,
|
93 |
+
)
|
94 |
+
self.args = args
|
95 |
+
self.processor = glue_processors[args.task_name]()
|
96 |
+
self.output_mode = glue_output_modes[args.task_name]
|
97 |
+
if isinstance(mode, str):
|
98 |
+
try:
|
99 |
+
mode = Split[mode]
|
100 |
+
except KeyError:
|
101 |
+
raise KeyError("mode is not a valid split name")
|
102 |
+
# Load data features from cache or dataset file
|
103 |
+
cached_features_file = os.path.join(
|
104 |
+
cache_dir if cache_dir is not None else args.data_dir,
|
105 |
+
f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}",
|
106 |
+
)
|
107 |
+
label_list = self.processor.get_labels()
|
108 |
+
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
|
109 |
+
"RobertaTokenizer",
|
110 |
+
"RobertaTokenizerFast",
|
111 |
+
"XLMRobertaTokenizer",
|
112 |
+
"BartTokenizer",
|
113 |
+
"BartTokenizerFast",
|
114 |
+
):
|
115 |
+
# HACK(label indices are swapped in RoBERTa pretrained model)
|
116 |
+
label_list[1], label_list[2] = label_list[2], label_list[1]
|
117 |
+
self.label_list = label_list
|
118 |
+
|
119 |
+
# Make sure only the first process in distributed training processes the dataset,
|
120 |
+
# and the others will use the cache.
|
121 |
+
lock_path = cached_features_file + ".lock"
|
122 |
+
with FileLock(lock_path):
|
123 |
+
if os.path.exists(cached_features_file) and not args.overwrite_cache:
|
124 |
+
start = time.time()
|
125 |
+
self.features = torch.load(cached_features_file)
|
126 |
+
logger.info(
|
127 |
+
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
|
128 |
+
)
|
129 |
+
else:
|
130 |
+
logger.info(f"Creating features from dataset file at {args.data_dir}")
|
131 |
+
|
132 |
+
if mode == Split.dev:
|
133 |
+
examples = self.processor.get_dev_examples(args.data_dir)
|
134 |
+
elif mode == Split.test:
|
135 |
+
examples = self.processor.get_test_examples(args.data_dir)
|
136 |
+
else:
|
137 |
+
examples = self.processor.get_train_examples(args.data_dir)
|
138 |
+
if limit_length is not None:
|
139 |
+
examples = examples[:limit_length]
|
140 |
+
self.features = glue_convert_examples_to_features(
|
141 |
+
examples,
|
142 |
+
tokenizer,
|
143 |
+
max_length=args.max_seq_length,
|
144 |
+
label_list=label_list,
|
145 |
+
output_mode=self.output_mode,
|
146 |
+
)
|
147 |
+
start = time.time()
|
148 |
+
torch.save(self.features, cached_features_file)
|
149 |
+
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
|
150 |
+
logger.info(
|
151 |
+
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
|
152 |
+
)
|
153 |
+
|
154 |
+
def __len__(self):
|
155 |
+
return len(self.features)
|
156 |
+
|
157 |
+
def __getitem__(self, i) -> InputFeatures:
|
158 |
+
return self.features[i]
|
159 |
+
|
160 |
+
def get_labels(self):
|
161 |
+
return self.label_list
|
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py
ADDED
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import json
|
16 |
+
import os
|
17 |
+
import pickle
|
18 |
+
import random
|
19 |
+
import time
|
20 |
+
import warnings
|
21 |
+
from typing import Dict, List, Optional
|
22 |
+
|
23 |
+
import torch
|
24 |
+
from filelock import FileLock
|
25 |
+
from torch.utils.data import Dataset
|
26 |
+
|
27 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
28 |
+
from ...utils import logging
|
29 |
+
|
30 |
+
|
31 |
+
logger = logging.get_logger(__name__)
|
32 |
+
|
33 |
+
|
34 |
+
DEPRECATION_WARNING = (
|
35 |
+
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
|
36 |
+
"library. You can have a look at this example script for pointers: {0}"
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
class TextDataset(Dataset):
|
41 |
+
"""
|
42 |
+
This will be superseded by a framework-agnostic approach soon.
|
43 |
+
"""
|
44 |
+
|
45 |
+
def __init__(
|
46 |
+
self,
|
47 |
+
tokenizer: PreTrainedTokenizer,
|
48 |
+
file_path: str,
|
49 |
+
block_size: int,
|
50 |
+
overwrite_cache=False,
|
51 |
+
cache_dir: Optional[str] = None,
|
52 |
+
):
|
53 |
+
warnings.warn(
|
54 |
+
DEPRECATION_WARNING.format(
|
55 |
+
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
|
56 |
+
),
|
57 |
+
FutureWarning,
|
58 |
+
)
|
59 |
+
if os.path.isfile(file_path) is False:
|
60 |
+
raise ValueError(f"Input file path {file_path} not found")
|
61 |
+
|
62 |
+
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
|
63 |
+
|
64 |
+
directory, filename = os.path.split(file_path)
|
65 |
+
cached_features_file = os.path.join(
|
66 |
+
cache_dir if cache_dir is not None else directory,
|
67 |
+
f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}",
|
68 |
+
)
|
69 |
+
|
70 |
+
# Make sure only the first process in distributed training processes the dataset,
|
71 |
+
# and the others will use the cache.
|
72 |
+
lock_path = cached_features_file + ".lock"
|
73 |
+
with FileLock(lock_path):
|
74 |
+
if os.path.exists(cached_features_file) and not overwrite_cache:
|
75 |
+
start = time.time()
|
76 |
+
with open(cached_features_file, "rb") as handle:
|
77 |
+
self.examples = pickle.load(handle)
|
78 |
+
logger.info(
|
79 |
+
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
|
80 |
+
)
|
81 |
+
|
82 |
+
else:
|
83 |
+
logger.info(f"Creating features from dataset file at {directory}")
|
84 |
+
|
85 |
+
self.examples = []
|
86 |
+
with open(file_path, encoding="utf-8") as f:
|
87 |
+
text = f.read()
|
88 |
+
|
89 |
+
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
|
90 |
+
|
91 |
+
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
|
92 |
+
self.examples.append(
|
93 |
+
tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
|
94 |
+
)
|
95 |
+
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
|
96 |
+
# If your dataset is small, first you should look for a bigger one :-) and second you
|
97 |
+
# can change this behavior by adding (model specific) padding.
|
98 |
+
|
99 |
+
start = time.time()
|
100 |
+
with open(cached_features_file, "wb") as handle:
|
101 |
+
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
102 |
+
logger.info(
|
103 |
+
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
|
104 |
+
)
|
105 |
+
|
106 |
+
def __len__(self):
|
107 |
+
return len(self.examples)
|
108 |
+
|
109 |
+
def __getitem__(self, i) -> torch.Tensor:
|
110 |
+
return torch.tensor(self.examples[i], dtype=torch.long)
|
111 |
+
|
112 |
+
|
113 |
+
class LineByLineTextDataset(Dataset):
|
114 |
+
"""
|
115 |
+
This will be superseded by a framework-agnostic approach soon.
|
116 |
+
"""
|
117 |
+
|
118 |
+
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
|
119 |
+
warnings.warn(
|
120 |
+
DEPRECATION_WARNING.format(
|
121 |
+
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
|
122 |
+
),
|
123 |
+
FutureWarning,
|
124 |
+
)
|
125 |
+
if os.path.isfile(file_path) is False:
|
126 |
+
raise ValueError(f"Input file path {file_path} not found")
|
127 |
+
# Here, we do not cache the features, operating under the assumption
|
128 |
+
# that we will soon use fast multithreaded tokenizers from the
|
129 |
+
# `tokenizers` repo everywhere =)
|
130 |
+
logger.info(f"Creating features from dataset file at {file_path}")
|
131 |
+
|
132 |
+
with open(file_path, encoding="utf-8") as f:
|
133 |
+
lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
|
134 |
+
|
135 |
+
batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
|
136 |
+
self.examples = batch_encoding["input_ids"]
|
137 |
+
self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
|
138 |
+
|
139 |
+
def __len__(self):
|
140 |
+
return len(self.examples)
|
141 |
+
|
142 |
+
def __getitem__(self, i) -> Dict[str, torch.tensor]:
|
143 |
+
return self.examples[i]
|
144 |
+
|
145 |
+
|
146 |
+
class LineByLineWithRefDataset(Dataset):
|
147 |
+
"""
|
148 |
+
This will be superseded by a framework-agnostic approach soon.
|
149 |
+
"""
|
150 |
+
|
151 |
+
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
|
152 |
+
warnings.warn(
|
153 |
+
DEPRECATION_WARNING.format(
|
154 |
+
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py"
|
155 |
+
),
|
156 |
+
FutureWarning,
|
157 |
+
)
|
158 |
+
if os.path.isfile(file_path) is False:
|
159 |
+
raise ValueError(f"Input file path {file_path} not found")
|
160 |
+
if os.path.isfile(ref_path) is False:
|
161 |
+
raise ValueError(f"Ref file path {file_path} not found")
|
162 |
+
# Here, we do not cache the features, operating under the assumption
|
163 |
+
# that we will soon use fast multithreaded tokenizers from the
|
164 |
+
# `tokenizers` repo everywhere =)
|
165 |
+
logger.info(f"Creating features from dataset file at {file_path}")
|
166 |
+
logger.info(f"Use ref segment results at {ref_path}")
|
167 |
+
with open(file_path, encoding="utf-8") as f:
|
168 |
+
data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line
|
169 |
+
data = [line.strip() for line in data if len(line) > 0 and not line.isspace()]
|
170 |
+
# Get ref inf from file
|
171 |
+
with open(ref_path, encoding="utf-8") as f:
|
172 |
+
ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
|
173 |
+
if len(data) != len(ref):
|
174 |
+
raise ValueError(
|
175 |
+
f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} "
|
176 |
+
f"while length of {ref_path} is {len(ref)}"
|
177 |
+
)
|
178 |
+
|
179 |
+
batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
|
180 |
+
self.examples = batch_encoding["input_ids"]
|
181 |
+
self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
|
182 |
+
|
183 |
+
n = len(self.examples)
|
184 |
+
for i in range(n):
|
185 |
+
self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long)
|
186 |
+
|
187 |
+
def __len__(self):
|
188 |
+
return len(self.examples)
|
189 |
+
|
190 |
+
def __getitem__(self, i) -> Dict[str, torch.tensor]:
|
191 |
+
return self.examples[i]
|
192 |
+
|
193 |
+
|
194 |
+
class LineByLineWithSOPTextDataset(Dataset):
|
195 |
+
"""
|
196 |
+
Dataset for sentence order prediction task, prepare sentence pairs for SOP task
|
197 |
+
"""
|
198 |
+
|
199 |
+
def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
|
200 |
+
warnings.warn(
|
201 |
+
DEPRECATION_WARNING.format(
|
202 |
+
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
|
203 |
+
),
|
204 |
+
FutureWarning,
|
205 |
+
)
|
206 |
+
if os.path.isdir(file_dir) is False:
|
207 |
+
raise ValueError(f"{file_dir} is not a directory")
|
208 |
+
logger.info(f"Creating features from dataset file folder at {file_dir}")
|
209 |
+
self.examples = []
|
210 |
+
# TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed)
|
211 |
+
# file path looks like ./dataset/wiki_1, ./dataset/wiki_2
|
212 |
+
for file_name in os.listdir(file_dir):
|
213 |
+
file_path = os.path.join(file_dir, file_name)
|
214 |
+
if os.path.isfile(file_path) is False:
|
215 |
+
raise ValueError(f"{file_path} is not a file")
|
216 |
+
article_open = False
|
217 |
+
with open(file_path, encoding="utf-8") as f:
|
218 |
+
original_lines = f.readlines()
|
219 |
+
article_lines = []
|
220 |
+
for line in original_lines:
|
221 |
+
if "<doc id=" in line:
|
222 |
+
article_open = True
|
223 |
+
elif "</doc>" in line:
|
224 |
+
article_open = False
|
225 |
+
document = [
|
226 |
+
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line))
|
227 |
+
for line in article_lines[1:]
|
228 |
+
if (len(line) > 0 and not line.isspace())
|
229 |
+
]
|
230 |
+
|
231 |
+
examples = self.create_examples_from_document(document, block_size, tokenizer)
|
232 |
+
self.examples.extend(examples)
|
233 |
+
article_lines = []
|
234 |
+
else:
|
235 |
+
if article_open:
|
236 |
+
article_lines.append(line)
|
237 |
+
|
238 |
+
logger.info("Dataset parse finished.")
|
239 |
+
|
240 |
+
def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
|
241 |
+
"""Creates examples for a single document."""
|
242 |
+
|
243 |
+
# Account for special tokens
|
244 |
+
max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
|
245 |
+
|
246 |
+
# We *usually* want to fill up the entire sequence since we are padding
|
247 |
+
# to `block_size` anyways, so short sequences are generally wasted
|
248 |
+
# computation. However, we *sometimes*
|
249 |
+
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
|
250 |
+
# sequences to minimize the mismatch between pretraining and fine-tuning.
|
251 |
+
# The `target_seq_length` is just a rough target however, whereas
|
252 |
+
# `block_size` is a hard limit.
|
253 |
+
target_seq_length = max_num_tokens
|
254 |
+
if random.random() < short_seq_prob:
|
255 |
+
target_seq_length = random.randint(2, max_num_tokens)
|
256 |
+
|
257 |
+
# We DON'T just concatenate all of the tokens from a document into a long
|
258 |
+
# sequence and choose an arbitrary split point because this would make the
|
259 |
+
# next sentence prediction task too easy. Instead, we split the input into
|
260 |
+
# segments "A" and "B" based on the actual "sentences" provided by the user
|
261 |
+
# input.
|
262 |
+
examples = []
|
263 |
+
current_chunk = [] # a buffer stored current working segments
|
264 |
+
current_length = 0
|
265 |
+
i = 0
|
266 |
+
while i < len(document):
|
267 |
+
segment = document[i] # get a segment
|
268 |
+
if not segment:
|
269 |
+
i += 1
|
270 |
+
continue
|
271 |
+
current_chunk.append(segment) # add a segment to current chunk
|
272 |
+
current_length += len(segment) # overall token length
|
273 |
+
# if current length goes to the target length or reaches the end of file, start building token a and b
|
274 |
+
if i == len(document) - 1 or current_length >= target_seq_length:
|
275 |
+
if current_chunk:
|
276 |
+
# `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence.
|
277 |
+
a_end = 1
|
278 |
+
# if current chunk has more than 2 sentences, pick part of it `A` (first) sentence
|
279 |
+
if len(current_chunk) >= 2:
|
280 |
+
a_end = random.randint(1, len(current_chunk) - 1)
|
281 |
+
# token a
|
282 |
+
tokens_a = []
|
283 |
+
for j in range(a_end):
|
284 |
+
tokens_a.extend(current_chunk[j])
|
285 |
+
|
286 |
+
# token b
|
287 |
+
tokens_b = []
|
288 |
+
for j in range(a_end, len(current_chunk)):
|
289 |
+
tokens_b.extend(current_chunk[j])
|
290 |
+
|
291 |
+
if len(tokens_a) == 0 or len(tokens_b) == 0:
|
292 |
+
continue
|
293 |
+
|
294 |
+
# switch tokens_a and tokens_b randomly
|
295 |
+
if random.random() < 0.5:
|
296 |
+
is_next = False
|
297 |
+
tokens_a, tokens_b = tokens_b, tokens_a
|
298 |
+
else:
|
299 |
+
is_next = True
|
300 |
+
|
301 |
+
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
|
302 |
+
"""Truncates a pair of sequences to a maximum sequence length."""
|
303 |
+
while True:
|
304 |
+
total_length = len(tokens_a) + len(tokens_b)
|
305 |
+
if total_length <= max_num_tokens:
|
306 |
+
break
|
307 |
+
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
|
308 |
+
if not (len(trunc_tokens) >= 1):
|
309 |
+
raise ValueError("Sequence length to be truncated must be no less than one")
|
310 |
+
# We want to sometimes truncate from the front and sometimes from the
|
311 |
+
# back to add more randomness and avoid biases.
|
312 |
+
if random.random() < 0.5:
|
313 |
+
del trunc_tokens[0]
|
314 |
+
else:
|
315 |
+
trunc_tokens.pop()
|
316 |
+
|
317 |
+
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
|
318 |
+
if not (len(tokens_a) >= 1):
|
319 |
+
raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
|
320 |
+
if not (len(tokens_b) >= 1):
|
321 |
+
raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
|
322 |
+
|
323 |
+
# add special tokens
|
324 |
+
input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
|
325 |
+
# add token type ids, 0 for sentence a, 1 for sentence b
|
326 |
+
token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
|
327 |
+
|
328 |
+
example = {
|
329 |
+
"input_ids": torch.tensor(input_ids, dtype=torch.long),
|
330 |
+
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
|
331 |
+
"sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long),
|
332 |
+
}
|
333 |
+
examples.append(example)
|
334 |
+
current_chunk = [] # clear current chunk
|
335 |
+
current_length = 0 # reset current text length
|
336 |
+
i += 1 # go to next line
|
337 |
+
return examples
|
338 |
+
|
339 |
+
def __len__(self):
|
340 |
+
return len(self.examples)
|
341 |
+
|
342 |
+
def __getitem__(self, i) -> Dict[str, torch.tensor]:
|
343 |
+
return self.examples[i]
|
344 |
+
|
345 |
+
|
346 |
+
class TextDatasetForNextSentencePrediction(Dataset):
|
347 |
+
"""
|
348 |
+
This will be superseded by a framework-agnostic approach soon.
|
349 |
+
"""
|
350 |
+
|
351 |
+
def __init__(
|
352 |
+
self,
|
353 |
+
tokenizer: PreTrainedTokenizer,
|
354 |
+
file_path: str,
|
355 |
+
block_size: int,
|
356 |
+
overwrite_cache=False,
|
357 |
+
short_seq_probability=0.1,
|
358 |
+
nsp_probability=0.5,
|
359 |
+
):
|
360 |
+
warnings.warn(
|
361 |
+
DEPRECATION_WARNING.format(
|
362 |
+
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
|
363 |
+
),
|
364 |
+
FutureWarning,
|
365 |
+
)
|
366 |
+
if not os.path.isfile(file_path):
|
367 |
+
raise ValueError(f"Input file path {file_path} not found")
|
368 |
+
|
369 |
+
self.short_seq_probability = short_seq_probability
|
370 |
+
self.nsp_probability = nsp_probability
|
371 |
+
|
372 |
+
directory, filename = os.path.split(file_path)
|
373 |
+
cached_features_file = os.path.join(
|
374 |
+
directory,
|
375 |
+
f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}",
|
376 |
+
)
|
377 |
+
|
378 |
+
self.tokenizer = tokenizer
|
379 |
+
|
380 |
+
# Make sure only the first process in distributed training processes the dataset,
|
381 |
+
# and the others will use the cache.
|
382 |
+
lock_path = cached_features_file + ".lock"
|
383 |
+
|
384 |
+
# Input file format:
|
385 |
+
# (1) One sentence per line. These should ideally be actual sentences, not
|
386 |
+
# entire paragraphs or arbitrary spans of text. (Because we use the
|
387 |
+
# sentence boundaries for the "next sentence prediction" task).
|
388 |
+
# (2) Blank lines between documents. Document boundaries are needed so
|
389 |
+
# that the "next sentence prediction" task doesn't span between documents.
|
390 |
+
#
|
391 |
+
# Example:
|
392 |
+
# I am very happy.
|
393 |
+
# Here is the second sentence.
|
394 |
+
#
|
395 |
+
# A new document.
|
396 |
+
|
397 |
+
with FileLock(lock_path):
|
398 |
+
if os.path.exists(cached_features_file) and not overwrite_cache:
|
399 |
+
start = time.time()
|
400 |
+
with open(cached_features_file, "rb") as handle:
|
401 |
+
self.examples = pickle.load(handle)
|
402 |
+
logger.info(
|
403 |
+
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
|
404 |
+
)
|
405 |
+
else:
|
406 |
+
logger.info(f"Creating features from dataset file at {directory}")
|
407 |
+
|
408 |
+
self.documents = [[]]
|
409 |
+
with open(file_path, encoding="utf-8") as f:
|
410 |
+
while True:
|
411 |
+
line = f.readline()
|
412 |
+
if not line:
|
413 |
+
break
|
414 |
+
line = line.strip()
|
415 |
+
|
416 |
+
# Empty lines are used as document delimiters
|
417 |
+
if not line and len(self.documents[-1]) != 0:
|
418 |
+
self.documents.append([])
|
419 |
+
tokens = tokenizer.tokenize(line)
|
420 |
+
tokens = tokenizer.convert_tokens_to_ids(tokens)
|
421 |
+
if tokens:
|
422 |
+
self.documents[-1].append(tokens)
|
423 |
+
|
424 |
+
logger.info(f"Creating examples from {len(self.documents)} documents.")
|
425 |
+
self.examples = []
|
426 |
+
for doc_index, document in enumerate(self.documents):
|
427 |
+
self.create_examples_from_document(document, doc_index, block_size)
|
428 |
+
|
429 |
+
start = time.time()
|
430 |
+
with open(cached_features_file, "wb") as handle:
|
431 |
+
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
432 |
+
logger.info(
|
433 |
+
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
|
434 |
+
)
|
435 |
+
|
436 |
+
def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int):
|
437 |
+
"""Creates examples for a single document."""
|
438 |
+
|
439 |
+
max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
|
440 |
+
|
441 |
+
# We *usually* want to fill up the entire sequence since we are padding
|
442 |
+
# to `block_size` anyways, so short sequences are generally wasted
|
443 |
+
# computation. However, we *sometimes*
|
444 |
+
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
|
445 |
+
# sequences to minimize the mismatch between pretraining and fine-tuning.
|
446 |
+
# The `target_seq_length` is just a rough target however, whereas
|
447 |
+
# `block_size` is a hard limit.
|
448 |
+
target_seq_length = max_num_tokens
|
449 |
+
if random.random() < self.short_seq_probability:
|
450 |
+
target_seq_length = random.randint(2, max_num_tokens)
|
451 |
+
|
452 |
+
current_chunk = [] # a buffer stored current working segments
|
453 |
+
current_length = 0
|
454 |
+
i = 0
|
455 |
+
|
456 |
+
while i < len(document):
|
457 |
+
segment = document[i]
|
458 |
+
current_chunk.append(segment)
|
459 |
+
current_length += len(segment)
|
460 |
+
if i == len(document) - 1 or current_length >= target_seq_length:
|
461 |
+
if current_chunk:
|
462 |
+
# `a_end` is how many segments from `current_chunk` go into the `A`
|
463 |
+
# (first) sentence.
|
464 |
+
a_end = 1
|
465 |
+
if len(current_chunk) >= 2:
|
466 |
+
a_end = random.randint(1, len(current_chunk) - 1)
|
467 |
+
|
468 |
+
tokens_a = []
|
469 |
+
for j in range(a_end):
|
470 |
+
tokens_a.extend(current_chunk[j])
|
471 |
+
|
472 |
+
tokens_b = []
|
473 |
+
|
474 |
+
if len(current_chunk) == 1 or random.random() < self.nsp_probability:
|
475 |
+
is_random_next = True
|
476 |
+
target_b_length = target_seq_length - len(tokens_a)
|
477 |
+
|
478 |
+
# This should rarely go for more than one iteration for large
|
479 |
+
# corpora. However, just to be careful, we try to make sure that
|
480 |
+
# the random document is not the same as the document
|
481 |
+
# we're processing.
|
482 |
+
for _ in range(10):
|
483 |
+
random_document_index = random.randint(0, len(self.documents) - 1)
|
484 |
+
if random_document_index != doc_index:
|
485 |
+
break
|
486 |
+
|
487 |
+
random_document = self.documents[random_document_index]
|
488 |
+
random_start = random.randint(0, len(random_document) - 1)
|
489 |
+
for j in range(random_start, len(random_document)):
|
490 |
+
tokens_b.extend(random_document[j])
|
491 |
+
if len(tokens_b) >= target_b_length:
|
492 |
+
break
|
493 |
+
# We didn't actually use these segments so we "put them back" so
|
494 |
+
# they don't go to waste.
|
495 |
+
num_unused_segments = len(current_chunk) - a_end
|
496 |
+
i -= num_unused_segments
|
497 |
+
# Actual next
|
498 |
+
else:
|
499 |
+
is_random_next = False
|
500 |
+
for j in range(a_end, len(current_chunk)):
|
501 |
+
tokens_b.extend(current_chunk[j])
|
502 |
+
|
503 |
+
if not (len(tokens_a) >= 1):
|
504 |
+
raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
|
505 |
+
if not (len(tokens_b) >= 1):
|
506 |
+
raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
|
507 |
+
|
508 |
+
# add special tokens
|
509 |
+
input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
|
510 |
+
# add token type ids, 0 for sentence a, 1 for sentence b
|
511 |
+
token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
|
512 |
+
|
513 |
+
example = {
|
514 |
+
"input_ids": torch.tensor(input_ids, dtype=torch.long),
|
515 |
+
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
|
516 |
+
"next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long),
|
517 |
+
}
|
518 |
+
|
519 |
+
self.examples.append(example)
|
520 |
+
|
521 |
+
current_chunk = []
|
522 |
+
current_length = 0
|
523 |
+
|
524 |
+
i += 1
|
525 |
+
|
526 |
+
def __len__(self):
|
527 |
+
return len(self.examples)
|
528 |
+
|
529 |
+
def __getitem__(self, i):
|
530 |
+
return self.examples[i]
|
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/squad.py
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
import time
|
17 |
+
from dataclasses import dataclass, field
|
18 |
+
from enum import Enum
|
19 |
+
from typing import Dict, List, Optional, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
from filelock import FileLock
|
23 |
+
from torch.utils.data import Dataset
|
24 |
+
|
25 |
+
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
|
26 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
27 |
+
from ...utils import logging
|
28 |
+
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
|
29 |
+
|
30 |
+
|
31 |
+
logger = logging.get_logger(__name__)
|
32 |
+
|
33 |
+
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
|
34 |
+
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
|
35 |
+
|
36 |
+
|
37 |
+
@dataclass
|
38 |
+
class SquadDataTrainingArguments:
|
39 |
+
"""
|
40 |
+
Arguments pertaining to what data we are going to input our model for training and eval.
|
41 |
+
"""
|
42 |
+
|
43 |
+
model_type: str = field(
|
44 |
+
default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)}
|
45 |
+
)
|
46 |
+
data_dir: str = field(
|
47 |
+
default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
|
48 |
+
)
|
49 |
+
max_seq_length: int = field(
|
50 |
+
default=128,
|
51 |
+
metadata={
|
52 |
+
"help": (
|
53 |
+
"The maximum total input sequence length after tokenization. Sequences longer "
|
54 |
+
"than this will be truncated, sequences shorter will be padded."
|
55 |
+
)
|
56 |
+
},
|
57 |
+
)
|
58 |
+
doc_stride: int = field(
|
59 |
+
default=128,
|
60 |
+
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
|
61 |
+
)
|
62 |
+
max_query_length: int = field(
|
63 |
+
default=64,
|
64 |
+
metadata={
|
65 |
+
"help": (
|
66 |
+
"The maximum number of tokens for the question. Questions longer than this will "
|
67 |
+
"be truncated to this length."
|
68 |
+
)
|
69 |
+
},
|
70 |
+
)
|
71 |
+
max_answer_length: int = field(
|
72 |
+
default=30,
|
73 |
+
metadata={
|
74 |
+
"help": (
|
75 |
+
"The maximum length of an answer that can be generated. This is needed because the start "
|
76 |
+
"and end predictions are not conditioned on one another."
|
77 |
+
)
|
78 |
+
},
|
79 |
+
)
|
80 |
+
overwrite_cache: bool = field(
|
81 |
+
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
82 |
+
)
|
83 |
+
version_2_with_negative: bool = field(
|
84 |
+
default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
|
85 |
+
)
|
86 |
+
null_score_diff_threshold: float = field(
|
87 |
+
default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
|
88 |
+
)
|
89 |
+
n_best_size: int = field(
|
90 |
+
default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
|
91 |
+
)
|
92 |
+
lang_id: int = field(
|
93 |
+
default=0,
|
94 |
+
metadata={
|
95 |
+
"help": (
|
96 |
+
"language id of input for language-specific xlm models (see"
|
97 |
+
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
|
98 |
+
)
|
99 |
+
},
|
100 |
+
)
|
101 |
+
threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
|
102 |
+
|
103 |
+
|
104 |
+
class Split(Enum):
|
105 |
+
train = "train"
|
106 |
+
dev = "dev"
|
107 |
+
|
108 |
+
|
109 |
+
class SquadDataset(Dataset):
|
110 |
+
"""
|
111 |
+
This will be superseded by a framework-agnostic approach soon.
|
112 |
+
"""
|
113 |
+
|
114 |
+
args: SquadDataTrainingArguments
|
115 |
+
features: List[SquadFeatures]
|
116 |
+
mode: Split
|
117 |
+
is_language_sensitive: bool
|
118 |
+
|
119 |
+
def __init__(
|
120 |
+
self,
|
121 |
+
args: SquadDataTrainingArguments,
|
122 |
+
tokenizer: PreTrainedTokenizer,
|
123 |
+
limit_length: Optional[int] = None,
|
124 |
+
mode: Union[str, Split] = Split.train,
|
125 |
+
is_language_sensitive: Optional[bool] = False,
|
126 |
+
cache_dir: Optional[str] = None,
|
127 |
+
dataset_format: Optional[str] = "pt",
|
128 |
+
):
|
129 |
+
self.args = args
|
130 |
+
self.is_language_sensitive = is_language_sensitive
|
131 |
+
self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
|
132 |
+
if isinstance(mode, str):
|
133 |
+
try:
|
134 |
+
mode = Split[mode]
|
135 |
+
except KeyError:
|
136 |
+
raise KeyError("mode is not a valid split name")
|
137 |
+
self.mode = mode
|
138 |
+
# Load data features from cache or dataset file
|
139 |
+
version_tag = "v2" if args.version_2_with_negative else "v1"
|
140 |
+
cached_features_file = os.path.join(
|
141 |
+
cache_dir if cache_dir is not None else args.data_dir,
|
142 |
+
f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}",
|
143 |
+
)
|
144 |
+
|
145 |
+
# Make sure only the first process in distributed training processes the dataset,
|
146 |
+
# and the others will use the cache.
|
147 |
+
lock_path = cached_features_file + ".lock"
|
148 |
+
with FileLock(lock_path):
|
149 |
+
if os.path.exists(cached_features_file) and not args.overwrite_cache:
|
150 |
+
start = time.time()
|
151 |
+
self.old_features = torch.load(cached_features_file)
|
152 |
+
|
153 |
+
# Legacy cache files have only features, while new cache files
|
154 |
+
# will have dataset and examples also.
|
155 |
+
self.features = self.old_features["features"]
|
156 |
+
self.dataset = self.old_features.get("dataset", None)
|
157 |
+
self.examples = self.old_features.get("examples", None)
|
158 |
+
logger.info(
|
159 |
+
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
|
160 |
+
)
|
161 |
+
|
162 |
+
if self.dataset is None or self.examples is None:
|
163 |
+
logger.warning(
|
164 |
+
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
|
165 |
+
" future run"
|
166 |
+
)
|
167 |
+
else:
|
168 |
+
if mode == Split.dev:
|
169 |
+
self.examples = self.processor.get_dev_examples(args.data_dir)
|
170 |
+
else:
|
171 |
+
self.examples = self.processor.get_train_examples(args.data_dir)
|
172 |
+
|
173 |
+
self.features, self.dataset = squad_convert_examples_to_features(
|
174 |
+
examples=self.examples,
|
175 |
+
tokenizer=tokenizer,
|
176 |
+
max_seq_length=args.max_seq_length,
|
177 |
+
doc_stride=args.doc_stride,
|
178 |
+
max_query_length=args.max_query_length,
|
179 |
+
is_training=mode == Split.train,
|
180 |
+
threads=args.threads,
|
181 |
+
return_dataset=dataset_format,
|
182 |
+
)
|
183 |
+
|
184 |
+
start = time.time()
|
185 |
+
torch.save(
|
186 |
+
{"features": self.features, "dataset": self.dataset, "examples": self.examples},
|
187 |
+
cached_features_file,
|
188 |
+
)
|
189 |
+
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
|
190 |
+
logger.info(
|
191 |
+
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
|
192 |
+
)
|
193 |
+
|
194 |
+
def __len__(self):
|
195 |
+
return len(self.features)
|
196 |
+
|
197 |
+
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
|
198 |
+
# Convert to Tensors and build dataset
|
199 |
+
feature = self.features[i]
|
200 |
+
|
201 |
+
input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
|
202 |
+
attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
|
203 |
+
token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
|
204 |
+
cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
|
205 |
+
p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
|
206 |
+
is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
|
207 |
+
|
208 |
+
inputs = {
|
209 |
+
"input_ids": input_ids,
|
210 |
+
"attention_mask": attention_mask,
|
211 |
+
"token_type_ids": token_type_ids,
|
212 |
+
}
|
213 |
+
|
214 |
+
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
|
215 |
+
del inputs["token_type_ids"]
|
216 |
+
|
217 |
+
if self.args.model_type in ["xlnet", "xlm"]:
|
218 |
+
inputs.update({"cls_index": cls_index, "p_mask": p_mask})
|
219 |
+
if self.args.version_2_with_negative:
|
220 |
+
inputs.update({"is_impossible": is_impossible})
|
221 |
+
if self.is_language_sensitive:
|
222 |
+
inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})
|
223 |
+
|
224 |
+
if self.mode == Split.train:
|
225 |
+
start_positions = torch.tensor(feature.start_position, dtype=torch.long)
|
226 |
+
end_positions = torch.tensor(feature.end_position, dtype=torch.long)
|
227 |
+
inputs.update({"start_positions": start_positions, "end_positions": end_positions})
|
228 |
+
|
229 |
+
return inputs
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/ms_deform_attn.h
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*!
|
2 |
+
**************************************************************************************************
|
3 |
+
* Deformable DETR
|
4 |
+
* Copyright (c) 2020 SenseTime. All Rights Reserved.
|
5 |
+
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
+
**************************************************************************************************
|
7 |
+
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
|
8 |
+
**************************************************************************************************
|
9 |
+
*/
|
10 |
+
|
11 |
+
#pragma once
|
12 |
+
|
13 |
+
#include "cpu/ms_deform_attn_cpu.h"
|
14 |
+
|
15 |
+
#ifdef WITH_CUDA
|
16 |
+
#include "cuda/ms_deform_attn_cuda.h"
|
17 |
+
#endif
|
18 |
+
|
19 |
+
|
20 |
+
at::Tensor
|
21 |
+
ms_deform_attn_forward(
|
22 |
+
const at::Tensor &value,
|
23 |
+
const at::Tensor &spatial_shapes,
|
24 |
+
const at::Tensor &level_start_index,
|
25 |
+
const at::Tensor &sampling_loc,
|
26 |
+
const at::Tensor &attn_weight,
|
27 |
+
const int im2col_step)
|
28 |
+
{
|
29 |
+
if (value.type().is_cuda())
|
30 |
+
{
|
31 |
+
#ifdef WITH_CUDA
|
32 |
+
return ms_deform_attn_cuda_forward(
|
33 |
+
value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
|
34 |
+
#else
|
35 |
+
AT_ERROR("Not compiled with GPU support");
|
36 |
+
#endif
|
37 |
+
}
|
38 |
+
AT_ERROR("Not implemented on the CPU");
|
39 |
+
}
|
40 |
+
|
41 |
+
std::vector<at::Tensor>
|
42 |
+
ms_deform_attn_backward(
|
43 |
+
const at::Tensor &value,
|
44 |
+
const at::Tensor &spatial_shapes,
|
45 |
+
const at::Tensor &level_start_index,
|
46 |
+
const at::Tensor &sampling_loc,
|
47 |
+
const at::Tensor &attn_weight,
|
48 |
+
const at::Tensor &grad_output,
|
49 |
+
const int im2col_step)
|
50 |
+
{
|
51 |
+
if (value.type().is_cuda())
|
52 |
+
{
|
53 |
+
#ifdef WITH_CUDA
|
54 |
+
return ms_deform_attn_cuda_backward(
|
55 |
+
value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
|
56 |
+
#else
|
57 |
+
AT_ERROR("Not compiled with GPU support");
|
58 |
+
#endif
|
59 |
+
}
|
60 |
+
AT_ERROR("Not implemented on the CPU");
|
61 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/vision.cpp
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*!
|
2 |
+
**************************************************************************************************
|
3 |
+
* Deformable DETR
|
4 |
+
* Copyright (c) 2020 SenseTime. All Rights Reserved.
|
5 |
+
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
+
**************************************************************************************************
|
7 |
+
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
|
8 |
+
**************************************************************************************************
|
9 |
+
*/
|
10 |
+
|
11 |
+
#include "ms_deform_attn.h"
|
12 |
+
|
13 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
14 |
+
m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
|
15 |
+
m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
|
16 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <stdio.h>
|
2 |
+
#include <assert.h>
|
3 |
+
#include "ATen/ATen.h"
|
4 |
+
#define MIN_VALUE (-1e38)
|
5 |
+
typedef at::BFloat16 bf16;
|
6 |
+
|
7 |
+
__global__ void kernel_forward_bf16(
|
8 |
+
const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u,
|
9 |
+
const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, bf16 *__restrict__ const _y
|
10 |
+
) {
|
11 |
+
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
12 |
+
const int _b = idx / C;
|
13 |
+
const int _c = idx % C;
|
14 |
+
const int _offset = _b * T * C + _c;
|
15 |
+
|
16 |
+
float u = float(_u[_c]);
|
17 |
+
float w = _w[_c];
|
18 |
+
const bf16 *__restrict__ const k = _k + _offset;
|
19 |
+
const bf16 *__restrict__ const v = _v + _offset;
|
20 |
+
bf16 *__restrict__ const y = _y + _offset;
|
21 |
+
|
22 |
+
// aa and bb are running sums divided by exp(pp) (to avoid overflow)
|
23 |
+
float aa = 0, bb = 0, pp = MIN_VALUE;
|
24 |
+
for (int i = 0; i < T; i++) {
|
25 |
+
const int ii = i * C;
|
26 |
+
const float kk = float(k[ii]);
|
27 |
+
const float vv = float(v[ii]);
|
28 |
+
|
29 |
+
float ww = u + kk;
|
30 |
+
float p = max(pp, ww);
|
31 |
+
float e1 = exp(pp - p);
|
32 |
+
float e2 = exp(ww - p);
|
33 |
+
y[ii] = bf16((e1 * aa + e2 * vv) / (e1 * bb + e2));
|
34 |
+
|
35 |
+
ww = w + pp;
|
36 |
+
p = max(ww, kk);
|
37 |
+
e1 = exp(ww - p);
|
38 |
+
e2 = exp(kk - p);
|
39 |
+
aa = e1 * aa + e2 * vv;
|
40 |
+
bb = e1 * bb + e2;
|
41 |
+
pp = p;
|
42 |
+
}
|
43 |
+
}
|
44 |
+
|
45 |
+
__global__ void kernel_forward_with_state_bf16(
|
46 |
+
const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u,
|
47 |
+
const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, bf16 *__restrict__ const _y,
|
48 |
+
float *__restrict__ const _s
|
49 |
+
) {
|
50 |
+
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
51 |
+
const int _b = idx / C;
|
52 |
+
const int _c = idx % C;
|
53 |
+
const int _offset_s = _b * C * 3 + _c * 3;
|
54 |
+
const int _offset = _b * T * C + _c;
|
55 |
+
|
56 |
+
float u = float(_u[_c]);
|
57 |
+
float w = _w[_c];
|
58 |
+
const bf16 *__restrict__ const k = _k + _offset;
|
59 |
+
const bf16 *__restrict__ const v = _v + _offset;
|
60 |
+
bf16 *__restrict__ const y = _y + _offset;
|
61 |
+
float *__restrict__ const s = _s + _offset_s;
|
62 |
+
|
63 |
+
// aa and bb are running sums divided by exp(pp) (to avoid overflow)
|
64 |
+
float aa = s[0], bb = s[1], pp = s[2];
|
65 |
+
for (int i = 0; i < T; i++) {
|
66 |
+
const int ii = i * C;
|
67 |
+
const float kk = float(k[ii]);
|
68 |
+
const float vv = float(v[ii]);
|
69 |
+
|
70 |
+
float ww = u + kk;
|
71 |
+
float p = max(pp, ww);
|
72 |
+
float e1 = exp(pp - p);
|
73 |
+
float e2 = exp(ww - p);
|
74 |
+
y[ii] = bf16(e1 * aa + e2 * vv) / (e1 * bb + e2);
|
75 |
+
|
76 |
+
ww = w + pp;
|
77 |
+
p = max(ww, kk);
|
78 |
+
e1 = exp(ww - p);
|
79 |
+
e2 = exp(kk - p);
|
80 |
+
aa = e1 * aa + e2 * vv;
|
81 |
+
bb = e1 * bb + e2;
|
82 |
+
pp = p;
|
83 |
+
}
|
84 |
+
s[0] = aa;
|
85 |
+
s[1] = bb;
|
86 |
+
s[2] = pp;
|
87 |
+
}
|
88 |
+
|
89 |
+
__global__ void kernel_backward_bf16(
|
90 |
+
const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u,
|
91 |
+
const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, const bf16 *__restrict__ const _y,
|
92 |
+
const bf16 *__restrict__ const _gy, bf16 *__restrict__ const _gw, bf16 *__restrict__ const _gu,
|
93 |
+
bf16 *__restrict__ const _gk, bf16 *__restrict__ const _gv
|
94 |
+
) {
|
95 |
+
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
96 |
+
const int _b = idx / C;
|
97 |
+
const int _c = idx % C;
|
98 |
+
const int _offset = _b * T * C + _c;
|
99 |
+
|
100 |
+
float u = float(_u[_c]);
|
101 |
+
float w = _w[_c];
|
102 |
+
const bf16 *__restrict__ const k = _k + _offset;
|
103 |
+
const bf16 *__restrict__ const v = _v + _offset;
|
104 |
+
const bf16 *__restrict__ const y = _y + _offset;
|
105 |
+
const bf16 *__restrict__ const gy = _gy + _offset;
|
106 |
+
bf16 *__restrict__ const gk = _gk + _offset;
|
107 |
+
bf16 *__restrict__ const gv = _gv + _offset;
|
108 |
+
|
109 |
+
float q[Tmax], r[Tmax];
|
110 |
+
|
111 |
+
float gw = 0, gu = 0, aa = 0, bb = 0, ga = 0, gb = 0, pp = MIN_VALUE;
|
112 |
+
for (int i = 0; i < T; i++) {
|
113 |
+
const int ii = i * C;
|
114 |
+
const float kk = float(k[ii]);
|
115 |
+
const float vv = float(v[ii]);
|
116 |
+
const float yy = float(y[ii]);
|
117 |
+
|
118 |
+
float ww = u + kk;
|
119 |
+
float p = max(pp, ww);
|
120 |
+
float e1 = exp(pp - p);
|
121 |
+
float e2 = exp(ww - p);
|
122 |
+
const float qq = float(gy[ii]) / (e1 * bb + e2);
|
123 |
+
gw += (ga - gb * yy) * e1 * qq;
|
124 |
+
gu += (vv - yy) * e2 * qq;
|
125 |
+
q[i] = qq;
|
126 |
+
r[i] = ww - p;
|
127 |
+
|
128 |
+
ww = w + pp;
|
129 |
+
p = max(ww, kk);
|
130 |
+
e1 = exp(ww - p);
|
131 |
+
e2 = exp(kk - p);
|
132 |
+
ga = e1 * (aa + ga);
|
133 |
+
gb = e1 * (bb + gb);
|
134 |
+
aa = e1 * aa + e2 * vv;
|
135 |
+
bb = e1 * bb + e2;
|
136 |
+
pp = p;
|
137 |
+
}
|
138 |
+
const int _offsetBC = _b * C + _c;
|
139 |
+
_gw[_offsetBC] = bf16(gw * _w[_c]); // multiply by w because of w -> -exp(w) in python forward()
|
140 |
+
_gu[_offsetBC] = bf16(gu);
|
141 |
+
|
142 |
+
aa = 0, bb = 0, pp = MIN_VALUE;
|
143 |
+
for (int i = T - 1; i >= 0; i--) {
|
144 |
+
const int ii = i * C;
|
145 |
+
const float kk = float(k[ii]);
|
146 |
+
const float vv = float(v[ii]);
|
147 |
+
const float yy = float(y[ii]);
|
148 |
+
const float qq = q[i];
|
149 |
+
const float rr = r[i];
|
150 |
+
|
151 |
+
float e1 = qq * exp(rr);
|
152 |
+
float e2 = exp(kk + pp);
|
153 |
+
gk[ii] = bf16(e1 * (vv - yy) + e2 * (aa * vv + bb));
|
154 |
+
gv[ii] = bf16(e1 + e2 * aa);
|
155 |
+
|
156 |
+
const float ww = w + pp;
|
157 |
+
const float www = rr - u - kk;
|
158 |
+
const float p = max(ww, www);
|
159 |
+
e1 = exp(ww - p);
|
160 |
+
e2 = qq * exp(www - p);
|
161 |
+
aa = e1 * aa + e2;
|
162 |
+
bb = e1 * bb - e2 * yy;
|
163 |
+
pp = p;
|
164 |
+
}
|
165 |
+
}
|
166 |
+
|
167 |
+
void cuda_forward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y) {
|
168 |
+
dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
|
169 |
+
assert(B * C % threadsPerBlock.x == 0);
|
170 |
+
dim3 numBlocks(B * C / threadsPerBlock.x);
|
171 |
+
kernel_forward_bf16<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y);
|
172 |
+
}
|
173 |
+
|
174 |
+
void cuda_forward_with_state_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, float *s) {
|
175 |
+
dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
|
176 |
+
assert(B * C % threadsPerBlock.x == 0);
|
177 |
+
dim3 numBlocks(B * C / threadsPerBlock.x);
|
178 |
+
kernel_forward_with_state_bf16<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, s);
|
179 |
+
}
|
180 |
+
|
181 |
+
void cuda_backward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv) {
|
182 |
+
dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
|
183 |
+
assert(B * C % threadsPerBlock.x == 0);
|
184 |
+
dim3 numBlocks(B * C / threadsPerBlock.x);
|
185 |
+
kernel_backward_bf16<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, gy, gw, gu, gk, gv);
|
186 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <torch/extension.h>
|
2 |
+
#include "ATen/ATen.h"
|
3 |
+
typedef at::BFloat16 bf16;
|
4 |
+
|
5 |
+
void cuda_forward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y);
|
6 |
+
void cuda_forward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y);
|
7 |
+
void cuda_forward_with_state(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *s);
|
8 |
+
void cuda_forward_with_state_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, float *s);
|
9 |
+
void cuda_backward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *gy, float *gw, float *gu, float *gk, float *gv);
|
10 |
+
void cuda_backward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv);
|
11 |
+
|
12 |
+
void forward(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) {
|
13 |
+
const int B = k.size(0);
|
14 |
+
const int T = k.size(1);
|
15 |
+
const int C = k.size(2);
|
16 |
+
cuda_forward(B, T, C, w.data_ptr<float>(), u.data_ptr<float>(), k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>());
|
17 |
+
}
|
18 |
+
void forward_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) {
|
19 |
+
const int B = k.size(0);
|
20 |
+
const int T = k.size(1);
|
21 |
+
const int C = k.size(2);
|
22 |
+
cuda_forward_bf16(B, T, C, w.data_ptr<float>(), u.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), y.data_ptr<bf16>());
|
23 |
+
}
|
24 |
+
void forward_with_state(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &s) {
|
25 |
+
const int B = k.size(0);
|
26 |
+
const int T = k.size(1);
|
27 |
+
const int C = k.size(2);
|
28 |
+
cuda_forward_with_state(B, T, C, w.data_ptr<float>(), u.data_ptr<float>(), k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(), s.data_ptr<float>());
|
29 |
+
}
|
30 |
+
void forward_with_state_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &s) {
|
31 |
+
const int B = k.size(0);
|
32 |
+
const int T = k.size(1);
|
33 |
+
const int C = k.size(2);
|
34 |
+
cuda_forward_with_state_bf16(B, T, C, w.data_ptr<float>(), u.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), y.data_ptr<bf16>(), s.data_ptr<float>());
|
35 |
+
}
|
36 |
+
void backward(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) {
|
37 |
+
const int B = k.size(0);
|
38 |
+
const int T = k.size(1);
|
39 |
+
const int C = k.size(2);
|
40 |
+
cuda_backward(B, T, C, w.data_ptr<float>(), u.data_ptr<float>(), k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(), gy.data_ptr<float>(), gw.data_ptr<float>(), gu.data_ptr<float>(), gk.data_ptr<float>(), gv.data_ptr<float>());
|
41 |
+
}
|
42 |
+
void backward_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) {
|
43 |
+
const int B = k.size(0);
|
44 |
+
const int T = k.size(1);
|
45 |
+
const int C = k.size(2);
|
46 |
+
cuda_backward_bf16(B, T, C, w.data_ptr<float>(), u.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), y.data_ptr<bf16>(),
|
47 |
+
gy.data_ptr<bf16>(), gw.data_ptr<bf16>(), gu.data_ptr<bf16>(), gk.data_ptr<bf16>(), gv.data_ptr<bf16>());
|
48 |
+
}
|
49 |
+
|
50 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
51 |
+
m.def("forward", &forward, "wkv forward");
|
52 |
+
m.def("forward_bf16", &forward_bf16, "wkv forward bf16");
|
53 |
+
m.def("forward_with_state", &forward_with_state, "wkv forward with state");
|
54 |
+
m.def("forward_with_state_bf16", &forward_with_state_bf16, "wkv forward with state bf16");
|
55 |
+
m.def("backward", &backward, "wkv backward");
|
56 |
+
m.def("backward_bf16", &backward_bf16, "wkv backward bf16");
|
57 |
+
}
|
58 |
+
|
59 |
+
TORCH_LIBRARY(wkv, m) {
|
60 |
+
m.def("forward", forward);
|
61 |
+
m.def("forward_bf16", forward_bf16);
|
62 |
+
m.def("forward_with_state", forward_with_state);
|
63 |
+
m.def("forward_with_state_bf16", forward_with_state_bf16);
|
64 |
+
m.def("backward", backward);
|
65 |
+
m.def("backward_bf16", backward_bf16);
|
66 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common.h
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#define min(a, b) ((a)<(b)?(a):(b))
|
3 |
+
#define max(a, b) ((a)>(b)?(a):(b))
|
4 |
+
#define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0))
|
5 |
+
#define select(cond, a, b) ((cond)?(a):(b))
|
6 |
+
#define PI 3.141592
|
7 |
+
#define EPSILON 1e-8
|
8 |
+
#define MAX_VAL 1e12
|
9 |
+
#define MIN_VAL -1e12
|
10 |
+
#define EMPTY_VALUE -1
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#define MAX_THREADS_PER_BLOCK 1024
|
3 |
+
#define OPTIMAL_THREADS_PER_BLOCK 256
|
4 |
+
#define WARP_SIZE 32
|
5 |
+
#define MAX_NUM_BLOCK_X 2147483647
|
6 |
+
#define MAX_NUM_BLOCK_Y 65535
|
7 |
+
#define MAX_NUM_BLOCK_Z 65535
|
8 |
+
#define MAX_SHARED_MEM_PER_BLOCK 48000
|
9 |
+
#define FULL_MASK 0xffffffff
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#include "common.h"
|
3 |
+
|
4 |
+
template<typename T>
|
5 |
+
__device__ int set_insert(T *set, int set_size, T value) {
|
6 |
+
int slot = value % set_size;
|
7 |
+
int start_slot = slot;
|
8 |
+
while (true) {
|
9 |
+
T prev = atomicCAS(&set[slot], EMPTY_VALUE, value);
|
10 |
+
if (prev == EMPTY_VALUE || prev == value) {
|
11 |
+
return slot;
|
12 |
+
}
|
13 |
+
slot = (slot + 1) % set_size;
|
14 |
+
if (slot == start_slot) {
|
15 |
+
return -1;
|
16 |
+
}
|
17 |
+
}
|
18 |
+
return -1;
|
19 |
+
}
|
20 |
+
|
21 |
+
template<typename T>
|
22 |
+
__device__ int set_lookup(T *set, int set_size, T value) {
|
23 |
+
int slot = value % set_size;
|
24 |
+
int start_slot = slot;
|
25 |
+
while (true) {
|
26 |
+
if (set[slot] == value) {
|
27 |
+
return slot;
|
28 |
+
}
|
29 |
+
slot = (slot + 1) % set_size;
|
30 |
+
if (slot == start_slot) {
|
31 |
+
return -1;
|
32 |
+
}
|
33 |
+
}
|
34 |
+
return -1;
|
35 |
+
}
|
36 |
+
|
37 |
+
template<typename T>
|
38 |
+
__device__ void init_buffer(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
|
39 |
+
__syncthreads();
|
40 |
+
for (int i = 0; i < buffer_size; i = i + num_threads) {
|
41 |
+
int offset_idx = i + thread_id;
|
42 |
+
if (offset_idx < buffer_size) {
|
43 |
+
buffer[offset_idx] = init_value;
|
44 |
+
}
|
45 |
+
}
|
46 |
+
__syncthreads();
|
47 |
+
}
|
48 |
+
|
49 |
+
template<typename T>
|
50 |
+
__device__ void copy_data(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
|
51 |
+
__syncthreads();
|
52 |
+
for (int i = 0; i < data_length; i = i + num_threads) {
|
53 |
+
int offset_idx = i + thread_id;
|
54 |
+
if (offset_idx < data_length) {
|
55 |
+
dist_pt[offset_idx] = src_pt[offset_idx];
|
56 |
+
}
|
57 |
+
}
|
58 |
+
__syncthreads();
|
59 |
+
}
|
60 |
+
|
61 |
+
template<typename T>
|
62 |
+
__device__ void init_buffer_nonblocking(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
|
63 |
+
for (int i = 0; i < buffer_size; i = i + num_threads) {
|
64 |
+
int offset_idx = i + thread_id;
|
65 |
+
if (offset_idx < buffer_size) {
|
66 |
+
buffer[offset_idx] = init_value;
|
67 |
+
}
|
68 |
+
}
|
69 |
+
}
|
70 |
+
|
71 |
+
template<typename T>
|
72 |
+
__device__ void copy_data_nonblocking(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
|
73 |
+
for (int i = 0; i < data_length; i = i + num_threads) {
|
74 |
+
int offset_idx = i + thread_id;
|
75 |
+
if (offset_idx < data_length) {
|
76 |
+
dist_pt[offset_idx] = src_pt[offset_idx];
|
77 |
+
}
|
78 |
+
}
|
79 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu
ADDED
@@ -0,0 +1,588 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation.cu
|
2 |
+
|
3 |
+
#include <torch/extension.h>
|
4 |
+
#include <ATen/ATen.h>
|
5 |
+
#include "fast_lsh_cumulation.h"
|
6 |
+
#include "fast_lsh_cumulation_cuda.h"
|
7 |
+
#include "common_cuda.h"
|
8 |
+
#include "common.h"
|
9 |
+
#include <vector>
|
10 |
+
//////////////////////////////////////////////////////////////////////////////////////////////////
|
11 |
+
//////////////////////////////////////////////////////////////////////////////////////////////////
|
12 |
+
|
13 |
+
std::vector<at::Tensor> fast_hash_ver1_kernel(
|
14 |
+
at::Tensor query_mask,
|
15 |
+
at::Tensor query_vector,
|
16 |
+
at::Tensor key_mask,
|
17 |
+
at::Tensor key_vector,
|
18 |
+
int num_hash_f,
|
19 |
+
int hash_code_len,
|
20 |
+
bool use_cuda
|
21 |
+
) {
|
22 |
+
|
23 |
+
int batch_size = query_vector.size(0);
|
24 |
+
int num_query = query_vector.size(1);
|
25 |
+
int num_key = key_vector.size(1);
|
26 |
+
int vector_dim = query_vector.size(2);
|
27 |
+
|
28 |
+
int num_hash_per_part = vector_dim / hash_code_len;
|
29 |
+
int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part));
|
30 |
+
|
31 |
+
at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1;
|
32 |
+
at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options());
|
33 |
+
at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options());
|
34 |
+
|
35 |
+
int *query_mask_ptr = query_mask.data_ptr<int>();
|
36 |
+
float *query_vector_ptr = query_vector.data_ptr<float>();
|
37 |
+
int *key_mask_ptr = key_mask.data_ptr<int>();
|
38 |
+
float *key_vector_ptr = key_vector.data_ptr<float>();
|
39 |
+
|
40 |
+
int *Dmat_ptr = Dmat.data_ptr<int>();
|
41 |
+
|
42 |
+
int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
|
43 |
+
int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
|
44 |
+
|
45 |
+
if (use_cuda) {
|
46 |
+
{
|
47 |
+
dim3 threads(vector_dim);
|
48 |
+
dim3 blocks(num_part, num_query, batch_size);
|
49 |
+
int shared_mem = vector_dim * sizeof(float);
|
50 |
+
fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>(
|
51 |
+
query_mask_ptr,
|
52 |
+
query_vector_ptr,
|
53 |
+
Dmat_ptr,
|
54 |
+
query_hash_code_ptr,
|
55 |
+
batch_size,
|
56 |
+
num_query,
|
57 |
+
vector_dim,
|
58 |
+
num_part,
|
59 |
+
num_hash_f,
|
60 |
+
hash_code_len
|
61 |
+
);
|
62 |
+
}
|
63 |
+
{
|
64 |
+
dim3 threads(vector_dim);
|
65 |
+
dim3 blocks(num_part, num_key, batch_size);
|
66 |
+
int shared_mem = vector_dim * sizeof(float);
|
67 |
+
fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>(
|
68 |
+
key_mask_ptr,
|
69 |
+
key_vector_ptr,
|
70 |
+
Dmat_ptr,
|
71 |
+
key_hash_code_ptr,
|
72 |
+
batch_size,
|
73 |
+
num_key,
|
74 |
+
vector_dim,
|
75 |
+
num_part,
|
76 |
+
num_hash_f,
|
77 |
+
hash_code_len
|
78 |
+
);
|
79 |
+
}
|
80 |
+
}
|
81 |
+
|
82 |
+
return {query_hash_code, key_hash_code};
|
83 |
+
|
84 |
+
}
|
85 |
+
|
86 |
+
at::Tensor lsh_cumulation_ver1_kernel(
|
87 |
+
at::Tensor query_mask,
|
88 |
+
at::Tensor query_hash_code,
|
89 |
+
at::Tensor key_mask,
|
90 |
+
at::Tensor key_hash_code,
|
91 |
+
at::Tensor value,
|
92 |
+
int hashtable_capacity,
|
93 |
+
bool use_cuda
|
94 |
+
) {
|
95 |
+
|
96 |
+
int batch_size = query_hash_code.size(0);
|
97 |
+
int num_hash_f = query_hash_code.size(2);
|
98 |
+
|
99 |
+
int num_query = query_hash_code.size(1);
|
100 |
+
int num_key = key_hash_code.size(1);
|
101 |
+
int value_dim = value.size(2);
|
102 |
+
|
103 |
+
at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
|
104 |
+
at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
|
105 |
+
|
106 |
+
if (use_cuda) {
|
107 |
+
int threads_x = WARP_SIZE;
|
108 |
+
int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
|
109 |
+
int block_x_step1 = num_key / threads_y;
|
110 |
+
int block_x_step2 = num_query / threads_y;
|
111 |
+
int block_y = batch_size;
|
112 |
+
|
113 |
+
dim3 threads(threads_x, threads_y);
|
114 |
+
dim3 blocks_step1(block_x_step1, block_y);
|
115 |
+
dim3 blocks_step2(block_x_step2, block_y);
|
116 |
+
|
117 |
+
int *query_mask_ptr = query_mask.data_ptr<int>();
|
118 |
+
int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
|
119 |
+
int *key_mask_ptr = key_mask.data_ptr<int>();
|
120 |
+
int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
|
121 |
+
float *value_ptr = value.data_ptr<float>();
|
122 |
+
float *hashtable_value_ptr = hashtable_value.data_ptr<float>();
|
123 |
+
float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
|
124 |
+
|
125 |
+
for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
|
126 |
+
|
127 |
+
cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
|
128 |
+
|
129 |
+
lsh_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>(
|
130 |
+
key_mask_ptr,
|
131 |
+
key_hash_code_ptr,
|
132 |
+
value_ptr,
|
133 |
+
hashtable_value_ptr,
|
134 |
+
batch_size,
|
135 |
+
num_hash_f,
|
136 |
+
hashtable_capacity,
|
137 |
+
num_key,
|
138 |
+
value_dim,
|
139 |
+
value_offset
|
140 |
+
);
|
141 |
+
|
142 |
+
lsh_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>(
|
143 |
+
query_mask_ptr,
|
144 |
+
query_hash_code_ptr,
|
145 |
+
hashtable_value_ptr,
|
146 |
+
cumulation_value_ptr,
|
147 |
+
batch_size,
|
148 |
+
num_hash_f,
|
149 |
+
hashtable_capacity,
|
150 |
+
num_query,
|
151 |
+
value_dim,
|
152 |
+
value_offset
|
153 |
+
);
|
154 |
+
}
|
155 |
+
|
156 |
+
}
|
157 |
+
|
158 |
+
return cumulation_value;
|
159 |
+
|
160 |
+
}
|
161 |
+
|
162 |
+
at::Tensor lsh_weighted_cumulation_ver1_kernel(
|
163 |
+
at::Tensor query_mask,
|
164 |
+
at::Tensor query_hash_code,
|
165 |
+
at::Tensor query_weight,
|
166 |
+
at::Tensor key_mask,
|
167 |
+
at::Tensor key_hash_code,
|
168 |
+
at::Tensor key_weight,
|
169 |
+
at::Tensor value,
|
170 |
+
int hashtable_capacity,
|
171 |
+
bool use_cuda
|
172 |
+
) {
|
173 |
+
|
174 |
+
int batch_size = query_hash_code.size(0);
|
175 |
+
int num_hash_f = query_hash_code.size(2);
|
176 |
+
|
177 |
+
int num_query = query_hash_code.size(1);
|
178 |
+
int num_key = key_hash_code.size(1);
|
179 |
+
int value_dim = value.size(2);
|
180 |
+
int weight_dim = query_weight.size(2);
|
181 |
+
|
182 |
+
at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
|
183 |
+
at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
|
184 |
+
|
185 |
+
if (use_cuda) {
|
186 |
+
int threads_x = WARP_SIZE;
|
187 |
+
int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
|
188 |
+
int block_x_step1 = num_key / threads_y;
|
189 |
+
int block_x_step2 = num_query / threads_y;
|
190 |
+
int block_y = batch_size;
|
191 |
+
|
192 |
+
dim3 threads(threads_x, threads_y);
|
193 |
+
dim3 blocks_step1(block_x_step1, block_y);
|
194 |
+
dim3 blocks_step2(block_x_step2, block_y);
|
195 |
+
|
196 |
+
int *query_mask_ptr = query_mask.data_ptr<int>();
|
197 |
+
int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
|
198 |
+
float *query_weight_ptr = query_weight.data_ptr<float>();
|
199 |
+
int *key_mask_ptr = key_mask.data_ptr<int>();
|
200 |
+
int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
|
201 |
+
float *key_weight_ptr = key_weight.data_ptr<float>();
|
202 |
+
float *value_ptr = value.data_ptr<float>();
|
203 |
+
float *hashtable_value_ptr = hashtable_value.data_ptr<float>();
|
204 |
+
float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
|
205 |
+
|
206 |
+
for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
|
207 |
+
for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) {
|
208 |
+
|
209 |
+
cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
|
210 |
+
|
211 |
+
lsh_weighted_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>(
|
212 |
+
key_mask_ptr,
|
213 |
+
key_hash_code_ptr,
|
214 |
+
key_weight_ptr,
|
215 |
+
value_ptr,
|
216 |
+
hashtable_value_ptr,
|
217 |
+
batch_size,
|
218 |
+
num_hash_f,
|
219 |
+
hashtable_capacity,
|
220 |
+
num_key,
|
221 |
+
value_dim,
|
222 |
+
weight_dim,
|
223 |
+
value_offset,
|
224 |
+
weight_idx
|
225 |
+
);
|
226 |
+
|
227 |
+
lsh_weighted_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>(
|
228 |
+
query_mask_ptr,
|
229 |
+
query_hash_code_ptr,
|
230 |
+
query_weight_ptr,
|
231 |
+
hashtable_value_ptr,
|
232 |
+
cumulation_value_ptr,
|
233 |
+
batch_size,
|
234 |
+
num_hash_f,
|
235 |
+
hashtable_capacity,
|
236 |
+
num_query,
|
237 |
+
value_dim,
|
238 |
+
weight_dim,
|
239 |
+
value_offset,
|
240 |
+
weight_idx
|
241 |
+
);
|
242 |
+
}
|
243 |
+
}
|
244 |
+
|
245 |
+
}
|
246 |
+
|
247 |
+
return cumulation_value;
|
248 |
+
|
249 |
+
}
|
250 |
+
|
251 |
+
at::Tensor lsh_weighted_cumulation_ver2_kernel(
|
252 |
+
at::Tensor query_mask,
|
253 |
+
at::Tensor query_hash_code,
|
254 |
+
at::Tensor query_weight,
|
255 |
+
at::Tensor key_mask,
|
256 |
+
at::Tensor key_hash_code,
|
257 |
+
at::Tensor key_weight,
|
258 |
+
at::Tensor value,
|
259 |
+
int hashtable_capacity,
|
260 |
+
bool use_cuda
|
261 |
+
) {
|
262 |
+
|
263 |
+
int batch_size = query_hash_code.size(0);
|
264 |
+
int num_hash_f = query_hash_code.size(2);
|
265 |
+
|
266 |
+
int num_query = query_hash_code.size(1);
|
267 |
+
int num_key = key_hash_code.size(1);
|
268 |
+
int value_dim = value.size(2);
|
269 |
+
int weight_dim = query_weight.size(2);
|
270 |
+
|
271 |
+
at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
|
272 |
+
at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options());
|
273 |
+
at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options());
|
274 |
+
at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
|
275 |
+
|
276 |
+
if (use_cuda) {
|
277 |
+
|
278 |
+
int *query_mask_ptr = query_mask.data_ptr<int>();
|
279 |
+
int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
|
280 |
+
float *query_weight_ptr = query_weight.data_ptr<float>();
|
281 |
+
int *key_mask_ptr = key_mask.data_ptr<int>();
|
282 |
+
int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
|
283 |
+
float *key_weight_ptr = key_weight.data_ptr<float>();
|
284 |
+
float *value_ptr = value.data_ptr<float>();
|
285 |
+
|
286 |
+
int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
|
287 |
+
int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr<int>();
|
288 |
+
int *query_info_ptr = query_info.data_ptr<int>();
|
289 |
+
|
290 |
+
float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
|
291 |
+
|
292 |
+
{
|
293 |
+
dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
|
294 |
+
dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
|
295 |
+
dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
|
296 |
+
dim3 blocks_step2(num_hash_f, batch_size);
|
297 |
+
int shared_mem = hashtable_capacity * sizeof(float);
|
298 |
+
count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
|
299 |
+
key_mask_ptr,
|
300 |
+
key_hash_code_ptr,
|
301 |
+
count_sort_table_ptr,
|
302 |
+
batch_size,
|
303 |
+
num_hash_f,
|
304 |
+
hashtable_capacity,
|
305 |
+
num_key
|
306 |
+
);
|
307 |
+
count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
|
308 |
+
count_sort_table_ptr,
|
309 |
+
batch_size,
|
310 |
+
num_hash_f,
|
311 |
+
hashtable_capacity
|
312 |
+
);
|
313 |
+
count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
|
314 |
+
key_mask_ptr,
|
315 |
+
key_hash_code_ptr,
|
316 |
+
count_sort_table_ptr,
|
317 |
+
key_sorted_idxes_ptr,
|
318 |
+
batch_size,
|
319 |
+
num_hash_f,
|
320 |
+
hashtable_capacity,
|
321 |
+
num_key
|
322 |
+
);
|
323 |
+
}
|
324 |
+
{
|
325 |
+
dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
|
326 |
+
dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
|
327 |
+
extract_query_info_cuda_kernel<<<blocks, threads>>>(
|
328 |
+
query_mask_ptr,
|
329 |
+
query_hash_code_ptr,
|
330 |
+
count_sort_table_ptr,
|
331 |
+
query_info_ptr,
|
332 |
+
batch_size,
|
333 |
+
num_hash_f,
|
334 |
+
hashtable_capacity,
|
335 |
+
num_query
|
336 |
+
);
|
337 |
+
}
|
338 |
+
{
|
339 |
+
dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
|
340 |
+
dim3 blocks(num_query, num_hash_f, batch_size);
|
341 |
+
int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float);
|
342 |
+
lsh_weighted_cumulation_ver2_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
|
343 |
+
query_mask_ptr,
|
344 |
+
query_info_ptr,
|
345 |
+
key_sorted_idxes_ptr,
|
346 |
+
query_weight_ptr,
|
347 |
+
key_weight_ptr,
|
348 |
+
value_ptr,
|
349 |
+
cumulation_value_ptr,
|
350 |
+
batch_size,
|
351 |
+
num_hash_f,
|
352 |
+
num_query,
|
353 |
+
num_key,
|
354 |
+
value_dim,
|
355 |
+
weight_dim
|
356 |
+
);
|
357 |
+
}
|
358 |
+
}
|
359 |
+
|
360 |
+
return cumulation_value;
|
361 |
+
|
362 |
+
}
|
363 |
+
|
364 |
+
at::Tensor lsh_weighted_cumulation_ver3_kernel(
|
365 |
+
at::Tensor query_mask,
|
366 |
+
at::Tensor query_hash_code,
|
367 |
+
at::Tensor query_weight,
|
368 |
+
at::Tensor key_mask,
|
369 |
+
at::Tensor key_hash_code,
|
370 |
+
at::Tensor key_weight,
|
371 |
+
at::Tensor value,
|
372 |
+
int hashtable_capacity,
|
373 |
+
bool use_cuda
|
374 |
+
) {
|
375 |
+
|
376 |
+
int batch_size = query_hash_code.size(0);
|
377 |
+
int num_hash_f = query_hash_code.size(2);
|
378 |
+
|
379 |
+
int num_query = query_hash_code.size(1);
|
380 |
+
int num_key = key_hash_code.size(1);
|
381 |
+
int value_dim = value.size(2);
|
382 |
+
int weight_dim = query_weight.size(2);
|
383 |
+
|
384 |
+
at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
|
385 |
+
at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
|
386 |
+
at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
|
387 |
+
at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
|
388 |
+
|
389 |
+
if (use_cuda) {
|
390 |
+
|
391 |
+
int *query_mask_ptr = query_mask.data_ptr<int>();
|
392 |
+
int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
|
393 |
+
float *query_weight_ptr = query_weight.data_ptr<float>();
|
394 |
+
int *key_mask_ptr = key_mask.data_ptr<int>();
|
395 |
+
int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
|
396 |
+
float *key_weight_ptr = key_weight.data_ptr<float>();
|
397 |
+
float *value_ptr = value.data_ptr<float>();
|
398 |
+
|
399 |
+
int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
|
400 |
+
int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>();
|
401 |
+
int *key_info_ptr = key_info.data_ptr<int>();
|
402 |
+
|
403 |
+
float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
|
404 |
+
|
405 |
+
{
|
406 |
+
dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
|
407 |
+
dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
|
408 |
+
dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
|
409 |
+
dim3 blocks_step2(num_hash_f, batch_size);
|
410 |
+
int shared_mem = hashtable_capacity * sizeof(float);
|
411 |
+
count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
|
412 |
+
query_mask_ptr,
|
413 |
+
query_hash_code_ptr,
|
414 |
+
count_sort_table_ptr,
|
415 |
+
batch_size,
|
416 |
+
num_hash_f,
|
417 |
+
hashtable_capacity,
|
418 |
+
num_query
|
419 |
+
);
|
420 |
+
count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
|
421 |
+
count_sort_table_ptr,
|
422 |
+
batch_size,
|
423 |
+
num_hash_f,
|
424 |
+
hashtable_capacity
|
425 |
+
);
|
426 |
+
count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
|
427 |
+
query_mask_ptr,
|
428 |
+
query_hash_code_ptr,
|
429 |
+
count_sort_table_ptr,
|
430 |
+
query_sorted_idxes_ptr,
|
431 |
+
batch_size,
|
432 |
+
num_hash_f,
|
433 |
+
hashtable_capacity,
|
434 |
+
num_query
|
435 |
+
);
|
436 |
+
}
|
437 |
+
{
|
438 |
+
dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
|
439 |
+
dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
|
440 |
+
extract_query_info_cuda_kernel<<<blocks, threads>>>(
|
441 |
+
key_mask_ptr,
|
442 |
+
key_hash_code_ptr,
|
443 |
+
count_sort_table_ptr,
|
444 |
+
key_info_ptr,
|
445 |
+
batch_size,
|
446 |
+
num_hash_f,
|
447 |
+
hashtable_capacity,
|
448 |
+
num_key
|
449 |
+
);
|
450 |
+
}
|
451 |
+
{
|
452 |
+
dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
|
453 |
+
dim3 blocks(num_key, num_hash_f, batch_size);
|
454 |
+
int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float);
|
455 |
+
lsh_weighted_cumulation_ver3_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
|
456 |
+
query_sorted_idxes_ptr,
|
457 |
+
key_mask_ptr,
|
458 |
+
key_info_ptr,
|
459 |
+
query_weight_ptr,
|
460 |
+
key_weight_ptr,
|
461 |
+
value_ptr,
|
462 |
+
cumulation_value_ptr,
|
463 |
+
batch_size,
|
464 |
+
num_hash_f,
|
465 |
+
num_query,
|
466 |
+
num_key,
|
467 |
+
value_dim,
|
468 |
+
weight_dim
|
469 |
+
);
|
470 |
+
}
|
471 |
+
}
|
472 |
+
|
473 |
+
return cumulation_value;
|
474 |
+
|
475 |
+
}
|
476 |
+
|
477 |
+
at::Tensor lsh_weighted_cumulation_ver4_kernel(
|
478 |
+
at::Tensor query_mask,
|
479 |
+
at::Tensor query_hash_code,
|
480 |
+
at::Tensor query_weight,
|
481 |
+
at::Tensor key_mask,
|
482 |
+
at::Tensor key_hash_code,
|
483 |
+
at::Tensor key_weight,
|
484 |
+
at::Tensor value,
|
485 |
+
int hashtable_capacity,
|
486 |
+
bool use_cuda
|
487 |
+
) {
|
488 |
+
|
489 |
+
int batch_size = query_hash_code.size(0);
|
490 |
+
int num_hash_f = query_hash_code.size(2);
|
491 |
+
|
492 |
+
int num_query = query_hash_code.size(1);
|
493 |
+
int num_key = key_hash_code.size(1);
|
494 |
+
int value_dim = value.size(2);
|
495 |
+
int weight_dim = query_weight.size(2);
|
496 |
+
|
497 |
+
at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
|
498 |
+
at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
|
499 |
+
at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
|
500 |
+
at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
|
501 |
+
|
502 |
+
if (use_cuda) {
|
503 |
+
|
504 |
+
int *query_mask_ptr = query_mask.data_ptr<int>();
|
505 |
+
int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
|
506 |
+
float *query_weight_ptr = query_weight.data_ptr<float>();
|
507 |
+
int *key_mask_ptr = key_mask.data_ptr<int>();
|
508 |
+
int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
|
509 |
+
float *key_weight_ptr = key_weight.data_ptr<float>();
|
510 |
+
float *value_ptr = value.data_ptr<float>();
|
511 |
+
|
512 |
+
int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
|
513 |
+
int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>();
|
514 |
+
int *key_info_ptr = key_info.data_ptr<int>();
|
515 |
+
|
516 |
+
float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
|
517 |
+
|
518 |
+
{
|
519 |
+
dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
|
520 |
+
dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
|
521 |
+
dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
|
522 |
+
dim3 blocks_step2(num_hash_f, batch_size);
|
523 |
+
int shared_mem = hashtable_capacity * sizeof(float);
|
524 |
+
count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
|
525 |
+
query_mask_ptr,
|
526 |
+
query_hash_code_ptr,
|
527 |
+
count_sort_table_ptr,
|
528 |
+
batch_size,
|
529 |
+
num_hash_f,
|
530 |
+
hashtable_capacity,
|
531 |
+
num_query
|
532 |
+
);
|
533 |
+
count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
|
534 |
+
count_sort_table_ptr,
|
535 |
+
batch_size,
|
536 |
+
num_hash_f,
|
537 |
+
hashtable_capacity
|
538 |
+
);
|
539 |
+
count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
|
540 |
+
query_mask_ptr,
|
541 |
+
query_hash_code_ptr,
|
542 |
+
count_sort_table_ptr,
|
543 |
+
query_sorted_idxes_ptr,
|
544 |
+
batch_size,
|
545 |
+
num_hash_f,
|
546 |
+
hashtable_capacity,
|
547 |
+
num_query
|
548 |
+
);
|
549 |
+
}
|
550 |
+
{
|
551 |
+
dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
|
552 |
+
dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
|
553 |
+
extract_query_info_cuda_kernel<<<blocks, threads>>>(
|
554 |
+
key_mask_ptr,
|
555 |
+
key_hash_code_ptr,
|
556 |
+
count_sort_table_ptr,
|
557 |
+
key_info_ptr,
|
558 |
+
batch_size,
|
559 |
+
num_hash_f,
|
560 |
+
hashtable_capacity,
|
561 |
+
num_key
|
562 |
+
);
|
563 |
+
}
|
564 |
+
{
|
565 |
+
dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
|
566 |
+
dim3 blocks(num_key, batch_size);
|
567 |
+
int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float);
|
568 |
+
lsh_weighted_cumulation_ver4_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
|
569 |
+
query_sorted_idxes_ptr,
|
570 |
+
key_mask_ptr,
|
571 |
+
key_info_ptr,
|
572 |
+
query_weight_ptr,
|
573 |
+
key_weight_ptr,
|
574 |
+
value_ptr,
|
575 |
+
cumulation_value_ptr,
|
576 |
+
batch_size,
|
577 |
+
num_hash_f,
|
578 |
+
num_query,
|
579 |
+
num_key,
|
580 |
+
value_dim,
|
581 |
+
weight_dim
|
582 |
+
);
|
583 |
+
}
|
584 |
+
}
|
585 |
+
|
586 |
+
return cumulation_value;
|
587 |
+
|
588 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <torch/extension.h>
|
2 |
+
#include <ATen/ATen.h>
|
3 |
+
#include <vector>
|
4 |
+
|
5 |
+
std::vector<at::Tensor> fast_hash_ver1_kernel(
|
6 |
+
at::Tensor query_mask,
|
7 |
+
at::Tensor query_vector,
|
8 |
+
at::Tensor key_mask,
|
9 |
+
at::Tensor key_vector,
|
10 |
+
int num_hash_f,
|
11 |
+
int hash_code_len,
|
12 |
+
bool use_cuda
|
13 |
+
);
|
14 |
+
|
15 |
+
at::Tensor lsh_cumulation_ver1_kernel(
|
16 |
+
at::Tensor query_mask,
|
17 |
+
at::Tensor query_hash_code,
|
18 |
+
at::Tensor key_mask,
|
19 |
+
at::Tensor key_hash_code,
|
20 |
+
at::Tensor value,
|
21 |
+
int hashtable_capacity,
|
22 |
+
bool use_cuda
|
23 |
+
);
|
24 |
+
|
25 |
+
at::Tensor lsh_weighted_cumulation_ver1_kernel(
|
26 |
+
at::Tensor query_mask,
|
27 |
+
at::Tensor query_hash_code,
|
28 |
+
at::Tensor query_weight,
|
29 |
+
at::Tensor key_mask,
|
30 |
+
at::Tensor key_hash_code,
|
31 |
+
at::Tensor key_weight,
|
32 |
+
at::Tensor value,
|
33 |
+
int hashtable_capacity,
|
34 |
+
bool use_cuda
|
35 |
+
);
|
36 |
+
|
37 |
+
at::Tensor lsh_weighted_cumulation_ver2_kernel(
|
38 |
+
at::Tensor query_mask,
|
39 |
+
at::Tensor query_hash_code,
|
40 |
+
at::Tensor query_weight,
|
41 |
+
at::Tensor key_mask,
|
42 |
+
at::Tensor key_hash_code,
|
43 |
+
at::Tensor key_weight,
|
44 |
+
at::Tensor value,
|
45 |
+
int hashtable_capacity,
|
46 |
+
bool use_cuda
|
47 |
+
);
|
48 |
+
|
49 |
+
at::Tensor lsh_weighted_cumulation_ver3_kernel(
|
50 |
+
at::Tensor query_mask,
|
51 |
+
at::Tensor query_hash_code,
|
52 |
+
at::Tensor query_weight,
|
53 |
+
at::Tensor key_mask,
|
54 |
+
at::Tensor key_hash_code,
|
55 |
+
at::Tensor key_weight,
|
56 |
+
at::Tensor value,
|
57 |
+
int hashtable_capacity,
|
58 |
+
bool use_cuda
|
59 |
+
);
|
60 |
+
|
61 |
+
at::Tensor lsh_weighted_cumulation_ver4_kernel(
|
62 |
+
at::Tensor query_mask,
|
63 |
+
at::Tensor query_hash_code,
|
64 |
+
at::Tensor query_weight,
|
65 |
+
at::Tensor key_mask,
|
66 |
+
at::Tensor key_hash_code,
|
67 |
+
at::Tensor key_weight,
|
68 |
+
at::Tensor value,
|
69 |
+
int hashtable_capacity,
|
70 |
+
bool use_cuda
|
71 |
+
);
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu
ADDED
@@ -0,0 +1,825 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation_cuda.cu
|
2 |
+
|
3 |
+
#include "fast_lsh_cumulation_cuda.h"
|
4 |
+
#include "common_cuda_device.h"
|
5 |
+
#include "common_cuda.h"
|
6 |
+
#include "common.h"
|
7 |
+
#include <stdio.h>
|
8 |
+
//////////////////////////////////////////////////////////////////////////////////////////////////
|
9 |
+
//////////////////////////////////////////////////////////////////////////////////////////////////
|
10 |
+
|
11 |
+
inline __device__ void fast_hadamard_transform(float *vector_buffer, int vector_dim, int dim_idx) {
|
12 |
+
int stride = vector_dim / 2;
|
13 |
+
while (stride > (WARP_SIZE / 2)) {
|
14 |
+
__syncthreads();
|
15 |
+
int sign = 1 - ((dim_idx / stride) % 2) * 2;
|
16 |
+
float val1 = vector_buffer[dim_idx];
|
17 |
+
float val2 = vector_buffer[dim_idx + sign * stride];
|
18 |
+
__syncthreads();
|
19 |
+
vector_buffer[dim_idx] = float(sign) * val1 + val2;
|
20 |
+
stride = stride / 2;
|
21 |
+
}
|
22 |
+
|
23 |
+
float val = vector_buffer[dim_idx];
|
24 |
+
#pragma unroll
|
25 |
+
for (stride = (WARP_SIZE / 2); stride > 0; stride = stride / 2) {
|
26 |
+
int sign = 1 - ((dim_idx / stride) % 2) * 2;
|
27 |
+
val = float(sign) * val + __shfl_xor_sync(FULL_MASK, val, stride);
|
28 |
+
}
|
29 |
+
vector_buffer[dim_idx] = val;
|
30 |
+
}
|
31 |
+
|
32 |
+
__global__ void fast_hash_ver1_cuda_kernel(
|
33 |
+
int *mask, // [batch_size, num_vector]
|
34 |
+
float *vector, // [batch_size, num_vector, vector_dim]
|
35 |
+
int *Dmat, // [batch_size, 3, num_part, vector_dim]
|
36 |
+
int *hash_code, // [batch_size, num_vector, num_hash_f]
|
37 |
+
int batch_size,
|
38 |
+
int num_vector,
|
39 |
+
int vector_dim,
|
40 |
+
int num_part,
|
41 |
+
int num_hash_f,
|
42 |
+
int hash_code_len
|
43 |
+
) {
|
44 |
+
|
45 |
+
int batch_idx = blockIdx.z;
|
46 |
+
int vector_idx = blockIdx.y;
|
47 |
+
int part_idx = blockIdx.x;
|
48 |
+
|
49 |
+
int dim_idx = threadIdx.x;
|
50 |
+
|
51 |
+
int batch_idx__vector_idx = batch_idx * num_vector + vector_idx;
|
52 |
+
if (mask[batch_idx__vector_idx] == 0) {
|
53 |
+
return;
|
54 |
+
}
|
55 |
+
|
56 |
+
extern __shared__ float buffer[];
|
57 |
+
float *vector_buffer = buffer;
|
58 |
+
|
59 |
+
vector_buffer[dim_idx] = vector[batch_idx__vector_idx * vector_dim + dim_idx];
|
60 |
+
|
61 |
+
vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 0) * num_part + part_idx) * vector_dim + dim_idx];
|
62 |
+
fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
|
63 |
+
vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 1) * num_part + part_idx) * vector_dim + dim_idx];
|
64 |
+
fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
|
65 |
+
vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 2) * num_part + part_idx) * vector_dim + dim_idx];
|
66 |
+
fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
|
67 |
+
|
68 |
+
int num_hash_per_part = vector_dim / hash_code_len;
|
69 |
+
if (hash_code_len == 8 || hash_code_len == 16) {
|
70 |
+
int code = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0);
|
71 |
+
for (int offset = 1; offset < hash_code_len; offset = offset * 2) {
|
72 |
+
code += __shfl_xor_sync(FULL_MASK, code, offset);
|
73 |
+
}
|
74 |
+
if (dim_idx % hash_code_len == 0) {
|
75 |
+
int hash_f_idx = part_idx * num_hash_per_part + dim_idx / hash_code_len;
|
76 |
+
if (hash_f_idx < num_hash_f) {
|
77 |
+
hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code;
|
78 |
+
}
|
79 |
+
}
|
80 |
+
} else {
|
81 |
+
vector_buffer[dim_idx] = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0);
|
82 |
+
__syncthreads();
|
83 |
+
if (dim_idx < num_hash_per_part) {
|
84 |
+
int code = 0;
|
85 |
+
for (int i = 0; i < hash_code_len; i++) {
|
86 |
+
code += vector_buffer[dim_idx * hash_code_len + i];
|
87 |
+
}
|
88 |
+
int hash_f_idx = part_idx * num_hash_per_part + dim_idx;
|
89 |
+
if (hash_f_idx < num_hash_f) {
|
90 |
+
hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code;
|
91 |
+
}
|
92 |
+
}
|
93 |
+
}
|
94 |
+
}
|
95 |
+
|
96 |
+
__global__ void lsh_cumulation_ver1_step1_cuda_kernel(
|
97 |
+
int *key_mask, // [batch_size, num_key]
|
98 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
99 |
+
float *value, // [batch_size, num_key, value_dim]
|
100 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
|
101 |
+
int batch_size,
|
102 |
+
int num_hash_f,
|
103 |
+
int hashtable_capacity,
|
104 |
+
int num_key,
|
105 |
+
int value_dim,
|
106 |
+
int offset_warp
|
107 |
+
) {
|
108 |
+
|
109 |
+
int warp_thread_idx = threadIdx.x;
|
110 |
+
|
111 |
+
int batch_idx = blockIdx.y;
|
112 |
+
int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
113 |
+
|
114 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
115 |
+
if (key_mask[batch_idx__key_idx] == 0) {
|
116 |
+
return;
|
117 |
+
}
|
118 |
+
|
119 |
+
if (num_hash_f > WARP_SIZE) {
|
120 |
+
float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
|
121 |
+
for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
|
122 |
+
int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx];
|
123 |
+
#pragma unroll
|
124 |
+
for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
|
125 |
+
int current_hashcode = warp_hashcode;
|
126 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
|
127 |
+
int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
|
128 |
+
atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
|
129 |
+
}
|
130 |
+
}
|
131 |
+
} else {
|
132 |
+
float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
|
133 |
+
int warp_hashcode = 0;
|
134 |
+
if (warp_thread_idx < num_hash_f) {
|
135 |
+
warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx];
|
136 |
+
}
|
137 |
+
for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
|
138 |
+
int current_hashcode = warp_hashcode;
|
139 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
|
140 |
+
int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
|
141 |
+
atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
|
142 |
+
}
|
143 |
+
}
|
144 |
+
|
145 |
+
}
|
146 |
+
|
147 |
+
__global__ void lsh_cumulation_ver1_step2_cuda_kernel(
|
148 |
+
int *query_mask, // [batch_size, num_query]
|
149 |
+
int *query_hash_code, // [batch_size, num_query, num_hash_f]
|
150 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
|
151 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
152 |
+
int batch_size,
|
153 |
+
int num_hash_f,
|
154 |
+
int hashtable_capacity,
|
155 |
+
int num_query,
|
156 |
+
int value_dim,
|
157 |
+
int offset_warp
|
158 |
+
) {
|
159 |
+
|
160 |
+
int warp_thread_idx = threadIdx.x;
|
161 |
+
|
162 |
+
int batch_idx = blockIdx.y;
|
163 |
+
int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
164 |
+
|
165 |
+
int batch_idx__query_idx = batch_idx * num_query + query_idx;
|
166 |
+
if (query_mask[batch_idx__query_idx] == 0) {
|
167 |
+
return;
|
168 |
+
}
|
169 |
+
|
170 |
+
if (num_hash_f > WARP_SIZE) {
|
171 |
+
float warp_value = 0;
|
172 |
+
for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
|
173 |
+
int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx];
|
174 |
+
#pragma unroll
|
175 |
+
for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
|
176 |
+
int current_hashcode = warp_hashcode;
|
177 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
|
178 |
+
int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
|
179 |
+
warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
|
180 |
+
}
|
181 |
+
}
|
182 |
+
cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f);
|
183 |
+
} else {
|
184 |
+
float warp_value = 0;
|
185 |
+
int warp_hashcode = 0;
|
186 |
+
if (warp_thread_idx < num_hash_f) {
|
187 |
+
warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx];
|
188 |
+
}
|
189 |
+
for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
|
190 |
+
int current_hashcode = warp_hashcode;
|
191 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
|
192 |
+
int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
|
193 |
+
warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
|
194 |
+
}
|
195 |
+
cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f);
|
196 |
+
}
|
197 |
+
|
198 |
+
}
|
199 |
+
|
200 |
+
__global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel(
|
201 |
+
int *key_mask, // [batch_size, num_key]
|
202 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
203 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
204 |
+
float *value, // [batch_size, num_key, value_dim]
|
205 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
|
206 |
+
int batch_size,
|
207 |
+
int num_hash_f,
|
208 |
+
int hashtable_capacity,
|
209 |
+
int num_key,
|
210 |
+
int value_dim,
|
211 |
+
int weight_dim,
|
212 |
+
int offset_warp,
|
213 |
+
int weight_idx
|
214 |
+
) {
|
215 |
+
|
216 |
+
int warp_thread_idx = threadIdx.x;
|
217 |
+
|
218 |
+
int batch_idx = blockIdx.y;
|
219 |
+
int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
220 |
+
|
221 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
222 |
+
if (key_mask[batch_idx__key_idx] == 0) {
|
223 |
+
return;
|
224 |
+
}
|
225 |
+
|
226 |
+
if (num_hash_f > WARP_SIZE) {
|
227 |
+
float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
|
228 |
+
for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
|
229 |
+
int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx];
|
230 |
+
#pragma unroll
|
231 |
+
for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
|
232 |
+
int current_hashcode = warp_hashcode;
|
233 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
|
234 |
+
int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
|
235 |
+
atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
|
236 |
+
}
|
237 |
+
}
|
238 |
+
} else {
|
239 |
+
float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
|
240 |
+
int warp_hashcode = 0;
|
241 |
+
if (warp_thread_idx < num_hash_f) {
|
242 |
+
warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx];
|
243 |
+
}
|
244 |
+
for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
|
245 |
+
int current_hashcode = warp_hashcode;
|
246 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
|
247 |
+
int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
|
248 |
+
atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
|
249 |
+
}
|
250 |
+
}
|
251 |
+
|
252 |
+
}
|
253 |
+
|
254 |
+
__global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel(
|
255 |
+
int *query_mask, // [batch_size, num_query]
|
256 |
+
int *query_hash_code, // [batch_size, num_query, num_hash_f]
|
257 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
258 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
|
259 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
260 |
+
int batch_size,
|
261 |
+
int num_hash_f,
|
262 |
+
int hashtable_capacity,
|
263 |
+
int num_query,
|
264 |
+
int value_dim,
|
265 |
+
int weight_dim,
|
266 |
+
int offset_warp,
|
267 |
+
int weight_idx
|
268 |
+
) {
|
269 |
+
|
270 |
+
int warp_thread_idx = threadIdx.x;
|
271 |
+
|
272 |
+
int batch_idx = blockIdx.y;
|
273 |
+
int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
274 |
+
|
275 |
+
int batch_idx__query_idx = batch_idx * num_query + query_idx;
|
276 |
+
if (query_mask[batch_idx__query_idx] == 0) {
|
277 |
+
return;
|
278 |
+
}
|
279 |
+
|
280 |
+
if (num_hash_f > WARP_SIZE) {
|
281 |
+
float warp_value = 0;
|
282 |
+
for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
|
283 |
+
int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx];
|
284 |
+
#pragma unroll
|
285 |
+
for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
|
286 |
+
int current_hashcode = warp_hashcode;
|
287 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
|
288 |
+
int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
|
289 |
+
warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
|
290 |
+
}
|
291 |
+
}
|
292 |
+
float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx];
|
293 |
+
cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f);
|
294 |
+
} else {
|
295 |
+
float warp_value = 0;
|
296 |
+
int warp_hashcode = 0;
|
297 |
+
if (warp_thread_idx < num_hash_f) {
|
298 |
+
warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx];
|
299 |
+
}
|
300 |
+
for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
|
301 |
+
int current_hashcode = warp_hashcode;
|
302 |
+
current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
|
303 |
+
int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
|
304 |
+
warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
|
305 |
+
}
|
306 |
+
float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx];
|
307 |
+
cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f);
|
308 |
+
}
|
309 |
+
|
310 |
+
}
|
311 |
+
|
312 |
+
__global__ void count_sort_step1_cuda_kernel(
|
313 |
+
int *key_mask, // [batch_size, num_key]
|
314 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
315 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
316 |
+
int batch_size,
|
317 |
+
int num_hash_f,
|
318 |
+
int hashtable_capacity,
|
319 |
+
int num_key
|
320 |
+
) {
|
321 |
+
|
322 |
+
int batch_idx = blockIdx.y;
|
323 |
+
int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
324 |
+
int hash_f_idx = threadIdx.x;
|
325 |
+
|
326 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
327 |
+
if (key_mask[batch_idx__key_idx] == 0) {
|
328 |
+
return;
|
329 |
+
}
|
330 |
+
|
331 |
+
int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx];
|
332 |
+
atomicAdd(&count_sort_table[(batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code], 1);
|
333 |
+
|
334 |
+
}
|
335 |
+
|
336 |
+
__global__ void count_sort_step2_cuda_kernel(
|
337 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
338 |
+
int batch_size,
|
339 |
+
int num_hash_f,
|
340 |
+
int hashtable_capacity
|
341 |
+
) {
|
342 |
+
|
343 |
+
int batch_idx = blockIdx.y;
|
344 |
+
int hash_f_idx = blockIdx.x;
|
345 |
+
|
346 |
+
int num_threads = blockDim.x;
|
347 |
+
int thread_id = threadIdx.x;
|
348 |
+
|
349 |
+
int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
|
350 |
+
|
351 |
+
extern __shared__ float buffer[];
|
352 |
+
int *table_buffer = (int*)buffer;
|
353 |
+
|
354 |
+
if (thread_id == 0) {
|
355 |
+
table_buffer[0] = 0;
|
356 |
+
}
|
357 |
+
copy_data<int>(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], &table_buffer[1], hashtable_capacity - 1, num_threads, thread_id);
|
358 |
+
|
359 |
+
for (int table_idx_start = 0; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + num_threads) {
|
360 |
+
int thread_value = table_buffer[table_idx_start + thread_id];
|
361 |
+
int next_thread_value = 0;
|
362 |
+
for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
|
363 |
+
next_thread_value = __shfl_up_sync(FULL_MASK, thread_value, offset);
|
364 |
+
if (thread_id % WARP_SIZE >= offset) {
|
365 |
+
thread_value = thread_value + next_thread_value;
|
366 |
+
}
|
367 |
+
}
|
368 |
+
table_buffer[table_idx_start + thread_id] = thread_value;
|
369 |
+
}
|
370 |
+
__syncthreads();
|
371 |
+
|
372 |
+
if (hashtable_capacity > WARP_SIZE) {
|
373 |
+
if (thread_id < WARP_SIZE) {
|
374 |
+
for (int table_idx_start = WARP_SIZE; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + WARP_SIZE) {
|
375 |
+
table_buffer[table_idx_start + thread_id] += table_buffer[table_idx_start - 1];
|
376 |
+
}
|
377 |
+
}
|
378 |
+
}
|
379 |
+
|
380 |
+
copy_data<int>(table_buffer, &count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], hashtable_capacity, num_threads, thread_id);
|
381 |
+
|
382 |
+
}
|
383 |
+
|
384 |
+
|
385 |
+
__global__ void count_sort_step3_cuda_kernel(
|
386 |
+
int *key_mask, // [batch_size, num_key]
|
387 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
388 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
389 |
+
int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
|
390 |
+
int batch_size,
|
391 |
+
int num_hash_f,
|
392 |
+
int hashtable_capacity,
|
393 |
+
int num_key
|
394 |
+
) {
|
395 |
+
|
396 |
+
int batch_idx = blockIdx.y;
|
397 |
+
int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
398 |
+
int hash_f_idx = threadIdx.x;
|
399 |
+
|
400 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
401 |
+
if (key_mask[batch_idx__key_idx] == 0) {
|
402 |
+
return;
|
403 |
+
}
|
404 |
+
|
405 |
+
int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
|
406 |
+
|
407 |
+
int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx];
|
408 |
+
int sort_idx = atomicAdd(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity + hash_code], 1);
|
409 |
+
key_sorted_idxes[batch_idx__hash_f_idx * num_key + sort_idx] = key_idx;
|
410 |
+
|
411 |
+
}
|
412 |
+
|
413 |
+
__global__ void extract_query_info_cuda_kernel(
|
414 |
+
int *query_mask, // [batch_size, num_query]
|
415 |
+
int *query_hash_code, // [batch_size, num_query, num_hash_f]
|
416 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
417 |
+
int *query_info, // [batch_size, num_query, 2, num_hash_f]
|
418 |
+
int batch_size,
|
419 |
+
int num_hash_f,
|
420 |
+
int hashtable_capacity,
|
421 |
+
int num_query
|
422 |
+
) {
|
423 |
+
|
424 |
+
int batch_idx = blockIdx.y;
|
425 |
+
int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
426 |
+
int hash_f_idx = threadIdx.x;
|
427 |
+
|
428 |
+
int batch_idx__query_idx = batch_idx * num_query + query_idx;
|
429 |
+
if (query_mask[batch_idx__query_idx] == 0) {
|
430 |
+
return;
|
431 |
+
}
|
432 |
+
|
433 |
+
int hash_code = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_idx];
|
434 |
+
int batch_idx__hash_f_idx__hash_code = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code;
|
435 |
+
|
436 |
+
int key_offset = select(hash_code == 0, 0, count_sort_table[batch_idx__hash_f_idx__hash_code - 1]);
|
437 |
+
int key_count = count_sort_table[batch_idx__hash_f_idx__hash_code] - key_offset;
|
438 |
+
|
439 |
+
query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx] = key_offset;
|
440 |
+
query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx] = key_count;
|
441 |
+
|
442 |
+
}
|
443 |
+
|
444 |
+
__global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel(
|
445 |
+
int *query_mask, // [batch_size, num_query]
|
446 |
+
int *query_info, // [batch_size, num_query, 2, num_hash_f]
|
447 |
+
int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
|
448 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
449 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
450 |
+
float *value, // [batch_size, num_key, value_dim]
|
451 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
452 |
+
int batch_size,
|
453 |
+
int num_hash_f,
|
454 |
+
int num_query,
|
455 |
+
int num_key,
|
456 |
+
int value_dim,
|
457 |
+
int weight_dim
|
458 |
+
) {
|
459 |
+
|
460 |
+
int batch_idx = blockIdx.z;
|
461 |
+
int hash_f_idx = blockIdx.y;
|
462 |
+
int query_idx = blockIdx.x;
|
463 |
+
|
464 |
+
int num_threads = blockDim.y * blockDim.x;
|
465 |
+
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
|
466 |
+
|
467 |
+
int num_warps = blockDim.y;
|
468 |
+
int warp_idx = threadIdx.y;
|
469 |
+
int warp_thread_idx = threadIdx.x;
|
470 |
+
|
471 |
+
int batch_idx__query_idx = batch_idx * num_query + query_idx;
|
472 |
+
if (query_mask[batch_idx__query_idx] == 0) {
|
473 |
+
return;
|
474 |
+
}
|
475 |
+
|
476 |
+
int key_offset = query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx];
|
477 |
+
int key_count = query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx];
|
478 |
+
|
479 |
+
if (key_count == 0) {
|
480 |
+
return;
|
481 |
+
}
|
482 |
+
|
483 |
+
extern __shared__ float buffer[];
|
484 |
+
|
485 |
+
if (key_count == 1) {
|
486 |
+
if (warp_idx == 0) {
|
487 |
+
int key_idx = key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset];
|
488 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
489 |
+
float weight = 0;
|
490 |
+
for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
|
491 |
+
int weight_dim_idx = weight_offset + warp_thread_idx;
|
492 |
+
float val = query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx];
|
493 |
+
#pragma unroll
|
494 |
+
for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
|
495 |
+
val += __shfl_xor_sync(FULL_MASK, val, offset);
|
496 |
+
}
|
497 |
+
weight = weight + val;
|
498 |
+
}
|
499 |
+
weight = weight / float(num_hash_f);
|
500 |
+
for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
|
501 |
+
int value_dim_idx = value_offset + warp_thread_idx;
|
502 |
+
float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
|
503 |
+
atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
|
504 |
+
}
|
505 |
+
}
|
506 |
+
} else {
|
507 |
+
float *weight_buffer = buffer;
|
508 |
+
int *key_idxes_buffer = (int*)&buffer[weight_dim];
|
509 |
+
|
510 |
+
copy_data_nonblocking<float>(&query_weight[batch_idx__query_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
|
511 |
+
|
512 |
+
while (key_count > 0) {
|
513 |
+
int work_size = min(WARP_SIZE, key_count);
|
514 |
+
copy_data_nonblocking<int>(&key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset], key_idxes_buffer, work_size, num_threads, thread_id);
|
515 |
+
__syncthreads();
|
516 |
+
for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) {
|
517 |
+
int work_idx = work_offset + warp_idx;
|
518 |
+
if (work_idx < key_count) {
|
519 |
+
int key_idx = key_idxes_buffer[work_idx];
|
520 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
521 |
+
float weight = 0;
|
522 |
+
for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
|
523 |
+
int weight_dim_idx = weight_offset + warp_thread_idx;
|
524 |
+
float val = weight_buffer[weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx];
|
525 |
+
#pragma unroll
|
526 |
+
for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
|
527 |
+
val += __shfl_xor_sync(FULL_MASK, val, offset);
|
528 |
+
}
|
529 |
+
weight = weight + val;
|
530 |
+
}
|
531 |
+
weight = weight / float(num_hash_f);
|
532 |
+
for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
|
533 |
+
int value_dim_idx = value_offset + warp_thread_idx;
|
534 |
+
float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
|
535 |
+
atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
|
536 |
+
}
|
537 |
+
}
|
538 |
+
}
|
539 |
+
key_count = key_count - work_size;
|
540 |
+
key_offset = key_offset + work_size;
|
541 |
+
}
|
542 |
+
}
|
543 |
+
|
544 |
+
}
|
545 |
+
|
546 |
+
__global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel(
|
547 |
+
int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
|
548 |
+
int *key_mask, // [batch_size, num_key]
|
549 |
+
int *key_info, // [batch_size, num_key, 2, num_hash_f]
|
550 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
551 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
552 |
+
float *value, // [batch_size, num_key, value_dim]
|
553 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
554 |
+
int batch_size,
|
555 |
+
int num_hash_f,
|
556 |
+
int num_query,
|
557 |
+
int num_key,
|
558 |
+
int value_dim,
|
559 |
+
int weight_dim
|
560 |
+
) {
|
561 |
+
|
562 |
+
int batch_idx = blockIdx.z;
|
563 |
+
int hash_f_idx = blockIdx.y;
|
564 |
+
int key_idx = blockIdx.x;
|
565 |
+
|
566 |
+
int num_threads = blockDim.y * blockDim.x;
|
567 |
+
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
|
568 |
+
|
569 |
+
int num_warps = blockDim.y;
|
570 |
+
int warp_idx = threadIdx.y;
|
571 |
+
int warp_thread_idx = threadIdx.x;
|
572 |
+
|
573 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
574 |
+
if (key_mask[batch_idx__key_idx] == 0) {
|
575 |
+
return;
|
576 |
+
}
|
577 |
+
|
578 |
+
int query_offset = key_info[batch_idx__key_idx * 2 * num_hash_f + hash_f_idx];
|
579 |
+
int query_count = key_info[(batch_idx__key_idx * 2 + 1) * num_hash_f + hash_f_idx];
|
580 |
+
|
581 |
+
if (query_count == 0) {
|
582 |
+
return;
|
583 |
+
}
|
584 |
+
|
585 |
+
extern __shared__ float buffer[];
|
586 |
+
|
587 |
+
if (query_count == 1) {
|
588 |
+
if (warp_idx == 0) {
|
589 |
+
int query_idx = query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset];
|
590 |
+
int batch_idx__query_idx = batch_idx * num_query + query_idx;
|
591 |
+
float weight = 0;
|
592 |
+
for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
|
593 |
+
int weight_dim_idx = weight_offset + warp_thread_idx;
|
594 |
+
float val = key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
|
595 |
+
#pragma unroll
|
596 |
+
for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
|
597 |
+
val += __shfl_xor_sync(FULL_MASK, val, offset);
|
598 |
+
}
|
599 |
+
weight = weight + val;
|
600 |
+
}
|
601 |
+
weight = weight / float(num_hash_f);
|
602 |
+
for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
|
603 |
+
int value_dim_idx = value_offset + warp_thread_idx;
|
604 |
+
float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
|
605 |
+
atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
|
606 |
+
}
|
607 |
+
}
|
608 |
+
} else {
|
609 |
+
float *weight_buffer = buffer;
|
610 |
+
float *value_buffer = &buffer[weight_dim];
|
611 |
+
int *query_idxes_buffer = (int*)&buffer[weight_dim + value_dim];
|
612 |
+
|
613 |
+
copy_data_nonblocking<float>(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
|
614 |
+
copy_data_nonblocking<float>(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id);
|
615 |
+
|
616 |
+
while (query_count > 0) {
|
617 |
+
int work_size = min(WARP_SIZE, query_count);
|
618 |
+
copy_data_nonblocking<int>(&query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset], query_idxes_buffer, work_size, num_threads, thread_id);
|
619 |
+
__syncthreads();
|
620 |
+
for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) {
|
621 |
+
int work_idx = work_offset + warp_idx;
|
622 |
+
if (work_idx < query_count) {
|
623 |
+
int query_idx = query_idxes_buffer[work_idx];
|
624 |
+
int batch_idx__query_idx = batch_idx * num_query + query_idx;
|
625 |
+
float weight = 0;
|
626 |
+
for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
|
627 |
+
int weight_dim_idx = weight_offset + warp_thread_idx;
|
628 |
+
float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
|
629 |
+
#pragma unroll
|
630 |
+
for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
|
631 |
+
val += __shfl_xor_sync(FULL_MASK, val, offset);
|
632 |
+
}
|
633 |
+
weight = weight + val;
|
634 |
+
}
|
635 |
+
weight = weight / float(num_hash_f);
|
636 |
+
for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
|
637 |
+
int value_dim_idx = value_offset + warp_thread_idx;
|
638 |
+
float val = value_buffer[value_dim_idx];
|
639 |
+
atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
|
640 |
+
}
|
641 |
+
}
|
642 |
+
}
|
643 |
+
query_count = query_count - work_size;
|
644 |
+
query_offset = query_offset + work_size;
|
645 |
+
}
|
646 |
+
}
|
647 |
+
|
648 |
+
}
|
649 |
+
|
650 |
+
__global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel(
|
651 |
+
int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
|
652 |
+
int *key_mask, // [batch_size, num_key]
|
653 |
+
int *key_info, // [batch_size, num_key, 2, num_hash_f]
|
654 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
655 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
656 |
+
float *value, // [batch_size, num_key, value_dim]
|
657 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
658 |
+
int batch_size,
|
659 |
+
int num_hash_f,
|
660 |
+
int num_query,
|
661 |
+
int num_key,
|
662 |
+
int value_dim,
|
663 |
+
int weight_dim
|
664 |
+
) {
|
665 |
+
|
666 |
+
int batch_idx = blockIdx.y;
|
667 |
+
int key_idx = blockIdx.x;
|
668 |
+
|
669 |
+
int num_threads = blockDim.y * blockDim.x;
|
670 |
+
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
|
671 |
+
|
672 |
+
int num_warps = blockDim.y;
|
673 |
+
int warp_idx = threadIdx.y;
|
674 |
+
int warp_thread_idx = threadIdx.x;
|
675 |
+
|
676 |
+
int batch_idx__key_idx = batch_idx * num_key + key_idx;
|
677 |
+
if (key_mask[batch_idx__key_idx] == 0) {
|
678 |
+
return;
|
679 |
+
}
|
680 |
+
|
681 |
+
extern __shared__ float buffer[];
|
682 |
+
float *weight_buffer = buffer;
|
683 |
+
float *value_buffer = &buffer[weight_dim];
|
684 |
+
int *key_info_buffer = (int*)&buffer[weight_dim + value_dim];
|
685 |
+
|
686 |
+
copy_data_nonblocking<float>(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
|
687 |
+
copy_data_nonblocking<float>(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id);
|
688 |
+
copy_data_nonblocking<int>(&key_info[batch_idx__key_idx * 2 * num_hash_f], key_info_buffer, 2 * num_hash_f, num_threads, thread_id);
|
689 |
+
|
690 |
+
int *query_offset_buffer = key_info_buffer;
|
691 |
+
int *query_count_buffer = &key_info_buffer[num_hash_f];
|
692 |
+
|
693 |
+
const int hashtable_size = 1024 + OPTIMAL_THREADS_PER_BLOCK;
|
694 |
+
__shared__ int hashtable_query[hashtable_size];
|
695 |
+
__shared__ int hashtable_count[hashtable_size];
|
696 |
+
__shared__ int inserted_query[hashtable_size];
|
697 |
+
__shared__ int query_counter[1];
|
698 |
+
|
699 |
+
int hash_f_idx_base = 0;
|
700 |
+
|
701 |
+
while (true) {
|
702 |
+
|
703 |
+
init_buffer_nonblocking<int>(EMPTY_VALUE, hashtable_query, hashtable_size, num_threads, thread_id);
|
704 |
+
init_buffer_nonblocking<int>(0, hashtable_count, hashtable_size, num_threads, thread_id);
|
705 |
+
init_buffer_nonblocking<int>(EMPTY_VALUE, inserted_query, hashtable_size, num_threads, thread_id);
|
706 |
+
init_buffer_nonblocking<int>(0, query_counter, 1, num_threads, thread_id);
|
707 |
+
__syncthreads();
|
708 |
+
|
709 |
+
while (hash_f_idx_base < num_hash_f) {
|
710 |
+
|
711 |
+
int hash_f_idx = hash_f_idx_base + warp_idx;
|
712 |
+
int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
|
713 |
+
|
714 |
+
int stop_flag = 0;
|
715 |
+
|
716 |
+
int query_offset = query_offset_buffer[hash_f_idx];
|
717 |
+
int query_count = query_count_buffer[hash_f_idx];
|
718 |
+
|
719 |
+
while (query_count > 0) {
|
720 |
+
|
721 |
+
int work_size = min(query_count, WARP_SIZE);
|
722 |
+
|
723 |
+
// try inserting query to set and check whether the query is new
|
724 |
+
int found_new_query = 0;
|
725 |
+
int query_idx = -1;
|
726 |
+
if (warp_thread_idx < work_size) {
|
727 |
+
query_idx = query_sorted_idxes[batch_idx__hash_f_idx * num_query + query_offset + warp_thread_idx];
|
728 |
+
int slot = set_insert<int>(hashtable_query, hashtable_size, query_idx);
|
729 |
+
if (slot >= 0) {
|
730 |
+
found_new_query = atomicAdd(&hashtable_count[slot], 1) == 0;
|
731 |
+
}
|
732 |
+
}
|
733 |
+
|
734 |
+
// compute cumulative offset
|
735 |
+
int position_offset = found_new_query;
|
736 |
+
int next_position_offset = 0;
|
737 |
+
#pragma unroll
|
738 |
+
for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
|
739 |
+
next_position_offset = __shfl_up_sync(FULL_MASK, position_offset, offset);
|
740 |
+
if (thread_id % WARP_SIZE >= offset) {
|
741 |
+
position_offset = position_offset + next_position_offset;
|
742 |
+
}
|
743 |
+
}
|
744 |
+
|
745 |
+
// get the inserted query list end index
|
746 |
+
int inserted_query_base = 0;
|
747 |
+
if (thread_id % WARP_SIZE == WARP_SIZE - 1) {
|
748 |
+
inserted_query_base = atomicAdd(query_counter, position_offset);
|
749 |
+
}
|
750 |
+
inserted_query_base = __shfl_sync(FULL_MASK, inserted_query_base, WARP_SIZE - 1);
|
751 |
+
|
752 |
+
// insert new queries to list
|
753 |
+
int insert_idx = inserted_query_base + position_offset - 1;
|
754 |
+
if (found_new_query) {
|
755 |
+
inserted_query[insert_idx] = query_idx;
|
756 |
+
}
|
757 |
+
|
758 |
+
// remove inserted queries from list
|
759 |
+
query_offset_buffer[hash_f_idx] += work_size;
|
760 |
+
query_count_buffer[hash_f_idx] -= work_size;
|
761 |
+
query_offset += work_size;
|
762 |
+
query_count -= work_size;
|
763 |
+
|
764 |
+
// if list is almost full, stop inserting
|
765 |
+
if (inserted_query_base + OPTIMAL_THREADS_PER_BLOCK > hashtable_size) {
|
766 |
+
stop_flag = 1;
|
767 |
+
break;
|
768 |
+
}
|
769 |
+
|
770 |
+
}
|
771 |
+
|
772 |
+
if (stop_flag) {
|
773 |
+
break;
|
774 |
+
}
|
775 |
+
|
776 |
+
hash_f_idx_base = hash_f_idx_base + num_warps;
|
777 |
+
|
778 |
+
}
|
779 |
+
|
780 |
+
__syncthreads();
|
781 |
+
|
782 |
+
int num_distint_query = query_counter[0];
|
783 |
+
|
784 |
+
if (num_distint_query > 0) {
|
785 |
+
for (int idx_base = 0; idx_base < num_distint_query; idx_base = idx_base + num_warps) {
|
786 |
+
int idx = idx_base + warp_idx;
|
787 |
+
if (idx < num_distint_query) {
|
788 |
+
int query_idx = inserted_query[idx];
|
789 |
+
int batch_idx__query_idx = batch_idx * num_query + query_idx;
|
790 |
+
|
791 |
+
int slot = set_lookup<int>(hashtable_query, hashtable_size, query_idx);
|
792 |
+
int duplicate_count = hashtable_count[slot];
|
793 |
+
|
794 |
+
float weight = 0;
|
795 |
+
for (int weight_idx_base = 0; weight_idx_base < weight_dim; weight_idx_base = weight_idx_base + WARP_SIZE) {
|
796 |
+
int weight_dim_idx = weight_idx_base + warp_thread_idx;
|
797 |
+
float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
|
798 |
+
#pragma unroll
|
799 |
+
for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
|
800 |
+
val += __shfl_xor_sync(FULL_MASK, val, offset);
|
801 |
+
}
|
802 |
+
weight = weight + val;
|
803 |
+
}
|
804 |
+
|
805 |
+
weight = (float)duplicate_count * weight / float(num_hash_f);
|
806 |
+
|
807 |
+
for (int value_idx_base = 0; value_idx_base < value_dim; value_idx_base = value_idx_base + WARP_SIZE) {
|
808 |
+
int value_dim_idx = value_idx_base + warp_thread_idx;
|
809 |
+
float val = value_buffer[value_dim_idx];
|
810 |
+
atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
|
811 |
+
}
|
812 |
+
}
|
813 |
+
}
|
814 |
+
} else {
|
815 |
+
|
816 |
+
// all computation is completed if num_distint_query == 0
|
817 |
+
break;
|
818 |
+
|
819 |
+
}
|
820 |
+
|
821 |
+
__syncthreads();
|
822 |
+
|
823 |
+
}
|
824 |
+
|
825 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__global__ void fast_hash_ver1_cuda_kernel(
|
2 |
+
int *mask, // [batch_size, num_vector]
|
3 |
+
float *vector, // [batch_size, num_vector, vector_dim]
|
4 |
+
int *Dmat, // [3, num_part, vector_dim]
|
5 |
+
int *hash_code, // [batch_size, num_vector, num_hash_f]
|
6 |
+
int batch_size,
|
7 |
+
int num_vector,
|
8 |
+
int vector_dim,
|
9 |
+
int num_part,
|
10 |
+
int num_hash_f,
|
11 |
+
int hash_code_len
|
12 |
+
);
|
13 |
+
|
14 |
+
__global__ void lsh_cumulation_ver1_step1_cuda_kernel(
|
15 |
+
int *key_mask, // [batch_size, num_key]
|
16 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
17 |
+
float *value, // [batch_size, num_key, value_dim]
|
18 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim]
|
19 |
+
int batch_size,
|
20 |
+
int num_hash_f,
|
21 |
+
int hashtable_capacity,
|
22 |
+
int num_key,
|
23 |
+
int value_dim,
|
24 |
+
int offset_warp
|
25 |
+
);
|
26 |
+
|
27 |
+
__global__ void lsh_cumulation_ver1_step2_cuda_kernel(
|
28 |
+
int *query_mask, // [batch_size, num_query]
|
29 |
+
int *query_hash_code, // [batch_size, num_query, num_hash_f]
|
30 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim]
|
31 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
32 |
+
int batch_size,
|
33 |
+
int num_hash_f,
|
34 |
+
int hashtable_capacity,
|
35 |
+
int num_query,
|
36 |
+
int value_dim,
|
37 |
+
int offset_warp
|
38 |
+
);
|
39 |
+
|
40 |
+
__global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel(
|
41 |
+
int *key_mask, // [batch_size, num_key]
|
42 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
43 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
44 |
+
float *value, // [batch_size, num_key, value_dim]
|
45 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
|
46 |
+
int batch_size,
|
47 |
+
int num_hash_f,
|
48 |
+
int hashtable_capacity,
|
49 |
+
int num_key,
|
50 |
+
int value_dim,
|
51 |
+
int weight_dim,
|
52 |
+
int offset_warp,
|
53 |
+
int weight_idx
|
54 |
+
);
|
55 |
+
|
56 |
+
__global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel(
|
57 |
+
int *query_mask, // [batch_size, num_query]
|
58 |
+
int *query_hash_code, // [batch_size, num_query, num_hash_f]
|
59 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
60 |
+
float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
|
61 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
62 |
+
int batch_size,
|
63 |
+
int num_hash_f,
|
64 |
+
int hashtable_capacity,
|
65 |
+
int num_query,
|
66 |
+
int value_dim,
|
67 |
+
int weight_dim,
|
68 |
+
int offset_warp,
|
69 |
+
int weight_idx
|
70 |
+
);
|
71 |
+
|
72 |
+
__global__ void count_sort_step1_cuda_kernel(
|
73 |
+
int *key_mask, // [batch_size, num_key]
|
74 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
75 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
76 |
+
int batch_size,
|
77 |
+
int num_hash_f,
|
78 |
+
int hashtable_capacity,
|
79 |
+
int num_key
|
80 |
+
);
|
81 |
+
|
82 |
+
__global__ void count_sort_step2_cuda_kernel(
|
83 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
84 |
+
int batch_size,
|
85 |
+
int num_hash_f,
|
86 |
+
int hashtable_capacity
|
87 |
+
);
|
88 |
+
|
89 |
+
__global__ void count_sort_step3_cuda_kernel(
|
90 |
+
int *key_mask, // [batch_size, num_key]
|
91 |
+
int *key_hash_code, // [batch_size, num_key, num_hash_f]
|
92 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
93 |
+
int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
|
94 |
+
int batch_size,
|
95 |
+
int num_hash_f,
|
96 |
+
int hashtable_capacity,
|
97 |
+
int num_key
|
98 |
+
);
|
99 |
+
|
100 |
+
__global__ void extract_query_info_cuda_kernel(
|
101 |
+
int *query_mask, // [batch_size, num_query]
|
102 |
+
int *query_hash_code, // [batch_size, num_query, num_hash_f]
|
103 |
+
int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
|
104 |
+
int *query_info, // [batch_size, num_query, 2, num_hash_f]
|
105 |
+
int batch_size,
|
106 |
+
int num_hash_f,
|
107 |
+
int hashtable_capacity,
|
108 |
+
int num_query
|
109 |
+
);
|
110 |
+
|
111 |
+
__global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel(
|
112 |
+
int *query_mask, // [batch_size, num_query]
|
113 |
+
int *query_info, // [batch_size, num_query, 2, num_hash_f]
|
114 |
+
int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
|
115 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
116 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
117 |
+
float *value, // [batch_size, num_key, value_dim]
|
118 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
119 |
+
int batch_size,
|
120 |
+
int num_hash_f,
|
121 |
+
int num_query,
|
122 |
+
int num_key,
|
123 |
+
int value_dim,
|
124 |
+
int weight_dim
|
125 |
+
);
|
126 |
+
|
127 |
+
__global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel(
|
128 |
+
int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
|
129 |
+
int *key_mask, // [batch_size, num_key]
|
130 |
+
int *key_info, // [batch_size, num_key, 2, num_hash_f]
|
131 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
132 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
133 |
+
float *value, // [batch_size, num_key, value_dim]
|
134 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
135 |
+
int batch_size,
|
136 |
+
int num_hash_f,
|
137 |
+
int num_query,
|
138 |
+
int num_key,
|
139 |
+
int value_dim,
|
140 |
+
int weight_dim
|
141 |
+
);
|
142 |
+
|
143 |
+
__global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel(
|
144 |
+
int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
|
145 |
+
int *key_mask, // [batch_size, num_key]
|
146 |
+
int *key_info, // [batch_size, num_key, 2, num_hash_f]
|
147 |
+
float *query_weight, // [batch_size, num_query, weight_dim]
|
148 |
+
float *key_weight, // [batch_size, num_key, weight_dim]
|
149 |
+
float *value, // [batch_size, num_key, value_dim]
|
150 |
+
float *cumulation_value, // [batch_size, num_query, value_dim]
|
151 |
+
int batch_size,
|
152 |
+
int num_hash_f,
|
153 |
+
int num_query,
|
154 |
+
int num_key,
|
155 |
+
int value_dim,
|
156 |
+
int weight_dim
|
157 |
+
);
|
llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <torch/extension.h>
|
2 |
+
#include <ATen/ATen.h>
|
3 |
+
#include "fast_lsh_cumulation.h"
|
4 |
+
#include "common_cuda.h"
|
5 |
+
#include <vector>
|
6 |
+
|
7 |
+
std::vector<at::Tensor> fast_hash(
|
8 |
+
at::Tensor query_mask,
|
9 |
+
at::Tensor query_vector,
|
10 |
+
at::Tensor key_mask,
|
11 |
+
at::Tensor key_vector,
|
12 |
+
int num_hash_f,
|
13 |
+
int hash_code_len,
|
14 |
+
bool use_cuda,
|
15 |
+
int version
|
16 |
+
) {
|
17 |
+
return fast_hash_ver1_kernel(
|
18 |
+
query_mask,
|
19 |
+
query_vector,
|
20 |
+
key_mask,
|
21 |
+
key_vector,
|
22 |
+
num_hash_f,
|
23 |
+
hash_code_len,
|
24 |
+
use_cuda
|
25 |
+
);
|
26 |
+
}
|
27 |
+
|
28 |
+
at::Tensor lsh_cumulation(
|
29 |
+
at::Tensor query_mask, // [batch_size, num_query]
|
30 |
+
at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f]
|
31 |
+
at::Tensor key_mask, // [batch_size, num_key]
|
32 |
+
at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f]
|
33 |
+
at::Tensor value, // [batch_size, num_key, value_dim]
|
34 |
+
int hashtable_capacity,
|
35 |
+
bool use_cuda,
|
36 |
+
int version
|
37 |
+
) {
|
38 |
+
return lsh_cumulation_ver1_kernel(
|
39 |
+
query_mask,
|
40 |
+
query_hash_code,
|
41 |
+
key_mask,
|
42 |
+
key_hash_code,
|
43 |
+
value,
|
44 |
+
hashtable_capacity,
|
45 |
+
use_cuda
|
46 |
+
);
|
47 |
+
}
|
48 |
+
|
49 |
+
at::Tensor lsh_weighted_cumulation(
|
50 |
+
at::Tensor query_mask, // [batch_size, num_query]
|
51 |
+
at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f]
|
52 |
+
at::Tensor query_weight, // [batch_size, num_query, weight_dim]
|
53 |
+
at::Tensor key_mask, // [batch_size, num_key]
|
54 |
+
at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f]
|
55 |
+
at::Tensor key_weight, // [batch_size, num_key, weight_dim]
|
56 |
+
at::Tensor value, // [batch_size, num_key, value_dim]
|
57 |
+
int hashtable_capacity,
|
58 |
+
bool use_cuda,
|
59 |
+
int version
|
60 |
+
) {
|
61 |
+
if (version == 1) {
|
62 |
+
return lsh_weighted_cumulation_ver1_kernel(
|
63 |
+
query_mask,
|
64 |
+
query_hash_code,
|
65 |
+
query_weight,
|
66 |
+
key_mask,
|
67 |
+
key_hash_code,
|
68 |
+
key_weight,
|
69 |
+
value,
|
70 |
+
hashtable_capacity,
|
71 |
+
use_cuda
|
72 |
+
);
|
73 |
+
} else if (version == 2) {
|
74 |
+
return lsh_weighted_cumulation_ver2_kernel(
|
75 |
+
query_mask,
|
76 |
+
query_hash_code,
|
77 |
+
query_weight,
|
78 |
+
key_mask,
|
79 |
+
key_hash_code,
|
80 |
+
key_weight,
|
81 |
+
value,
|
82 |
+
hashtable_capacity,
|
83 |
+
use_cuda
|
84 |
+
);
|
85 |
+
} else if (version == 3) {
|
86 |
+
return lsh_weighted_cumulation_ver3_kernel(
|
87 |
+
query_mask,
|
88 |
+
query_hash_code,
|
89 |
+
query_weight,
|
90 |
+
key_mask,
|
91 |
+
key_hash_code,
|
92 |
+
key_weight,
|
93 |
+
value,
|
94 |
+
hashtable_capacity,
|
95 |
+
use_cuda
|
96 |
+
);
|
97 |
+
} else if (version == 4) {
|
98 |
+
return lsh_weighted_cumulation_ver4_kernel(
|
99 |
+
query_mask,
|
100 |
+
query_hash_code,
|
101 |
+
query_weight,
|
102 |
+
key_mask,
|
103 |
+
key_hash_code,
|
104 |
+
key_weight,
|
105 |
+
value,
|
106 |
+
hashtable_capacity,
|
107 |
+
use_cuda
|
108 |
+
);
|
109 |
+
} else {
|
110 |
+
return lsh_weighted_cumulation_ver3_kernel(
|
111 |
+
query_mask,
|
112 |
+
query_hash_code,
|
113 |
+
query_weight,
|
114 |
+
key_mask,
|
115 |
+
key_hash_code,
|
116 |
+
key_weight,
|
117 |
+
value,
|
118 |
+
hashtable_capacity,
|
119 |
+
use_cuda
|
120 |
+
);
|
121 |
+
}
|
122 |
+
}
|
123 |
+
|
124 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
125 |
+
m.def("fast_hash", &fast_hash, "Fast Hash (CUDA)");
|
126 |
+
m.def("lsh_cumulation", &lsh_cumulation, "LSH Cumulation (CUDA)");
|
127 |
+
m.def("lsh_weighted_cumulation", &lsh_weighted_cumulation, "LSH Weighted Cumulation (CUDA)");
|
128 |
+
}
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/__init__.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
|
22 |
+
"processing_git": ["GitProcessor"],
|
23 |
+
}
|
24 |
+
|
25 |
+
try:
|
26 |
+
if not is_torch_available():
|
27 |
+
raise OptionalDependencyNotAvailable()
|
28 |
+
except OptionalDependencyNotAvailable:
|
29 |
+
pass
|
30 |
+
else:
|
31 |
+
_import_structure["modeling_git"] = [
|
32 |
+
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
33 |
+
"GitForCausalLM",
|
34 |
+
"GitModel",
|
35 |
+
"GitPreTrainedModel",
|
36 |
+
"GitVisionModel",
|
37 |
+
]
|
38 |
+
|
39 |
+
if TYPE_CHECKING:
|
40 |
+
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
|
41 |
+
from .processing_git import GitProcessor
|
42 |
+
|
43 |
+
try:
|
44 |
+
if not is_torch_available():
|
45 |
+
raise OptionalDependencyNotAvailable()
|
46 |
+
except OptionalDependencyNotAvailable:
|
47 |
+
pass
|
48 |
+
else:
|
49 |
+
from .modeling_git import (
|
50 |
+
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
51 |
+
GitForCausalLM,
|
52 |
+
GitModel,
|
53 |
+
GitPreTrainedModel,
|
54 |
+
GitVisionModel,
|
55 |
+
)
|
56 |
+
|
57 |
+
else:
|
58 |
+
import sys
|
59 |
+
|
60 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (998 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/configuration_git.cpython-310.pyc
ADDED
Binary file (9.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/convert_git_to_pytorch.cpython-310.pyc
ADDED
Binary file (14.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/modeling_git.cpython-310.pyc
ADDED
Binary file (48.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/processing_git.cpython-310.pyc
ADDED
Binary file (5.05 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/configuration_git.py
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import os
|
17 |
+
from typing import Union
|
18 |
+
|
19 |
+
from ...configuration_utils import PretrainedConfig
|
20 |
+
from ...utils import logging
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
from ..deprecated._archive_maps import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
27 |
+
|
28 |
+
|
29 |
+
class GitVisionConfig(PretrainedConfig):
|
30 |
+
r"""
|
31 |
+
This is the configuration class to store the configuration of a [`GitVisionModel`]. It is used to instantiate a GIT
|
32 |
+
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
|
33 |
+
with the defaults will yield a similar configuration to that of the vision encoder of the GIT
|
34 |
+
[microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
|
35 |
+
|
36 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
+
documentation from [`PretrainedConfig`] for more information.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
41 |
+
Dimensionality of the encoder layers and the pooler layer.
|
42 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
43 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
44 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
45 |
+
Number of hidden layers in the Transformer encoder.
|
46 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
47 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
48 |
+
image_size (`int`, *optional*, defaults to 224):
|
49 |
+
The size (resolution) of each image.
|
50 |
+
patch_size (`int`, *optional*, defaults to 16):
|
51 |
+
The size (resolution) of each patch.
|
52 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
|
53 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
54 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
|
55 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
|
56 |
+
The epsilon used by the layer normalization layers.
|
57 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
58 |
+
The dropout ratio for the attention probabilities.
|
59 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
60 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
61 |
+
|
62 |
+
Example:
|
63 |
+
|
64 |
+
```python
|
65 |
+
>>> from transformers import GitVisionConfig, GitVisionModel
|
66 |
+
|
67 |
+
>>> # Initializing a GitVisionConfig with microsoft/git-base style configuration
|
68 |
+
>>> configuration = GitVisionConfig()
|
69 |
+
|
70 |
+
>>> # Initializing a GitVisionModel (with random weights) from the microsoft/git-base style configuration
|
71 |
+
>>> model = GitVisionModel(configuration)
|
72 |
+
|
73 |
+
>>> # Accessing the model configuration
|
74 |
+
>>> configuration = model.config
|
75 |
+
```"""
|
76 |
+
|
77 |
+
model_type = "git_vision_model"
|
78 |
+
|
79 |
+
def __init__(
|
80 |
+
self,
|
81 |
+
hidden_size=768,
|
82 |
+
intermediate_size=3072,
|
83 |
+
num_hidden_layers=12,
|
84 |
+
num_attention_heads=12,
|
85 |
+
num_channels=3,
|
86 |
+
image_size=224,
|
87 |
+
patch_size=16,
|
88 |
+
hidden_act="quick_gelu",
|
89 |
+
layer_norm_eps=1e-5,
|
90 |
+
attention_dropout=0.0,
|
91 |
+
initializer_range=0.02,
|
92 |
+
**kwargs,
|
93 |
+
):
|
94 |
+
super().__init__(**kwargs)
|
95 |
+
|
96 |
+
self.hidden_size = hidden_size
|
97 |
+
self.intermediate_size = intermediate_size
|
98 |
+
self.num_hidden_layers = num_hidden_layers
|
99 |
+
self.num_attention_heads = num_attention_heads
|
100 |
+
self.num_channels = num_channels
|
101 |
+
self.patch_size = patch_size
|
102 |
+
self.image_size = image_size
|
103 |
+
self.initializer_range = initializer_range
|
104 |
+
self.attention_dropout = attention_dropout
|
105 |
+
self.layer_norm_eps = layer_norm_eps
|
106 |
+
self.hidden_act = hidden_act
|
107 |
+
|
108 |
+
@classmethod
|
109 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
110 |
+
cls._set_token_in_kwargs(kwargs)
|
111 |
+
|
112 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
113 |
+
|
114 |
+
# get the vision config dict if we are loading from GITConfig
|
115 |
+
if config_dict.get("model_type") == "git":
|
116 |
+
config_dict = config_dict["vision_config"]
|
117 |
+
|
118 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
119 |
+
logger.warning(
|
120 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
121 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
122 |
+
)
|
123 |
+
|
124 |
+
return cls.from_dict(config_dict, **kwargs)
|
125 |
+
|
126 |
+
|
127 |
+
class GitConfig(PretrainedConfig):
|
128 |
+
r"""
|
129 |
+
This is the configuration class to store the configuration of a [`GitModel`]. It is used to instantiate a GIT model
|
130 |
+
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
131 |
+
defaults will yield a similar configuration to that of the GIT
|
132 |
+
[microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
|
133 |
+
|
134 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
135 |
+
documentation from [`PretrainedConfig`] for more information.
|
136 |
+
|
137 |
+
Args:
|
138 |
+
vision_config (`dict`, *optional*):
|
139 |
+
Dictionary of configuration options used to initialize [`GitVisionConfig`].
|
140 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
141 |
+
Vocabulary size of the GIT model. Defines the number of different tokens that can be represented by the
|
142 |
+
`inputs_ids` passed when calling [`GitModel`].
|
143 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
144 |
+
Dimensionality of the encoder layers and the pooler layer.
|
145 |
+
num_hidden_layers (`int`, *optional*, defaults to 6):
|
146 |
+
Number of hidden layers in the Transformer encoder.
|
147 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
148 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
149 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
150 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
151 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
152 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
153 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
154 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
155 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
156 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
157 |
+
The dropout ratio for the attention probabilities.
|
158 |
+
max_position_embeddings (`int`, *optional*, defaults to 1024):
|
159 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
160 |
+
just in case (e.g., 512 or 1024 or 2048).
|
161 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
162 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
163 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
164 |
+
The epsilon used by the layer normalization layers.
|
165 |
+
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
|
166 |
+
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
|
167 |
+
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
|
168 |
+
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
|
169 |
+
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
|
170 |
+
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
|
171 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
172 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
173 |
+
num_image_with_embedding (`int`, *optional*):
|
174 |
+
The number of temporal embeddings to add, in case the model is used for video captioning/VQA.
|
175 |
+
|
176 |
+
Examples:
|
177 |
+
|
178 |
+
```python
|
179 |
+
>>> from transformers import GitConfig, GitModel
|
180 |
+
|
181 |
+
>>> # Initializing a GIT microsoft/git-base style configuration
|
182 |
+
>>> configuration = GitConfig()
|
183 |
+
|
184 |
+
>>> # Initializing a model (with random weights) from the microsoft/git-base style configuration
|
185 |
+
>>> model = GitModel(configuration)
|
186 |
+
|
187 |
+
>>> # Accessing the model configuration
|
188 |
+
>>> configuration = model.config
|
189 |
+
```"""
|
190 |
+
|
191 |
+
model_type = "git"
|
192 |
+
|
193 |
+
def __init__(
|
194 |
+
self,
|
195 |
+
vision_config=None,
|
196 |
+
vocab_size=30522,
|
197 |
+
hidden_size=768,
|
198 |
+
num_hidden_layers=6,
|
199 |
+
num_attention_heads=12,
|
200 |
+
intermediate_size=3072,
|
201 |
+
hidden_act="gelu",
|
202 |
+
hidden_dropout_prob=0.1,
|
203 |
+
attention_probs_dropout_prob=0.1,
|
204 |
+
max_position_embeddings=1024,
|
205 |
+
initializer_range=0.02,
|
206 |
+
layer_norm_eps=1e-12,
|
207 |
+
pad_token_id=0,
|
208 |
+
position_embedding_type="absolute",
|
209 |
+
use_cache=True,
|
210 |
+
tie_word_embeddings=False,
|
211 |
+
bos_token_id=101,
|
212 |
+
eos_token_id=102,
|
213 |
+
num_image_with_embedding=None,
|
214 |
+
**kwargs,
|
215 |
+
):
|
216 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
|
217 |
+
|
218 |
+
if vision_config is None:
|
219 |
+
vision_config = {}
|
220 |
+
logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
|
221 |
+
|
222 |
+
self.vision_config = GitVisionConfig(**vision_config)
|
223 |
+
self.vocab_size = vocab_size
|
224 |
+
self.hidden_size = hidden_size
|
225 |
+
self.num_hidden_layers = num_hidden_layers
|
226 |
+
self.num_attention_heads = num_attention_heads
|
227 |
+
self.hidden_act = hidden_act
|
228 |
+
self.intermediate_size = intermediate_size
|
229 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
230 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
231 |
+
self.max_position_embeddings = max_position_embeddings
|
232 |
+
self.initializer_range = initializer_range
|
233 |
+
self.layer_norm_eps = layer_norm_eps
|
234 |
+
self.position_embedding_type = position_embedding_type
|
235 |
+
self.use_cache = use_cache
|
236 |
+
self.tie_word_embeddings = tie_word_embeddings
|
237 |
+
self.num_image_with_embedding = num_image_with_embedding
|
238 |
+
|
239 |
+
self.bos_token_id = bos_token_id
|
240 |
+
self.eos_token_id = eos_token_id
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/convert_git_to_pytorch.py
ADDED
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert GIT checkpoints from the original repository.
|
16 |
+
|
17 |
+
URL: https://github.com/microsoft/GenerativeImage2Text/tree/main"""
|
18 |
+
|
19 |
+
|
20 |
+
import argparse
|
21 |
+
from pathlib import Path
|
22 |
+
|
23 |
+
import numpy as np
|
24 |
+
import requests
|
25 |
+
import torch
|
26 |
+
from huggingface_hub import hf_hub_download
|
27 |
+
from PIL import Image
|
28 |
+
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
|
29 |
+
|
30 |
+
from transformers import (
|
31 |
+
AutoTokenizer,
|
32 |
+
CLIPImageProcessor,
|
33 |
+
GitConfig,
|
34 |
+
GitForCausalLM,
|
35 |
+
GitProcessor,
|
36 |
+
GitVisionConfig,
|
37 |
+
VideoMAEImageProcessor,
|
38 |
+
)
|
39 |
+
from transformers.utils import logging
|
40 |
+
|
41 |
+
|
42 |
+
logging.set_verbosity_info()
|
43 |
+
logger = logging.get_logger(__name__)
|
44 |
+
|
45 |
+
|
46 |
+
def get_git_config(model_name):
|
47 |
+
if "base" in model_name and "vqa" in model_name:
|
48 |
+
image_size = 480
|
49 |
+
elif "large" in model_name and "vqa" in model_name:
|
50 |
+
image_size = 420
|
51 |
+
else:
|
52 |
+
image_size = 224
|
53 |
+
|
54 |
+
vision_config = GitVisionConfig(image_size=image_size)
|
55 |
+
|
56 |
+
if "large" in model_name:
|
57 |
+
vision_config.patch_size = 14
|
58 |
+
vision_config.hidden_size = 1024
|
59 |
+
vision_config.intermediate_size = 4096
|
60 |
+
vision_config.num_hidden_layers = 24
|
61 |
+
vision_config.num_attention_heads = 16
|
62 |
+
|
63 |
+
is_video = "vatex" in model_name or "msrvtt" in model_name
|
64 |
+
num_image_with_embedding = 6 if is_video else None
|
65 |
+
config = GitConfig(vision_config=vision_config.to_dict(), num_image_with_embedding=num_image_with_embedding)
|
66 |
+
|
67 |
+
return config, image_size, is_video
|
68 |
+
|
69 |
+
|
70 |
+
# here we list all keys to be renamed (original name on the left, our name on the right)
|
71 |
+
def create_rename_keys(config, prefix=""):
|
72 |
+
rename_keys = []
|
73 |
+
|
74 |
+
# image encoder
|
75 |
+
# ftm: off
|
76 |
+
rename_keys.append(
|
77 |
+
(f"{prefix}image_encoder.class_embedding", "git.image_encoder.vision_model.embeddings.class_embedding")
|
78 |
+
)
|
79 |
+
rename_keys.append(
|
80 |
+
(
|
81 |
+
f"{prefix}image_encoder.positional_embedding",
|
82 |
+
"git.image_encoder.vision_model.embeddings.position_embedding.weight",
|
83 |
+
)
|
84 |
+
)
|
85 |
+
rename_keys.append(
|
86 |
+
(f"{prefix}image_encoder.conv1.weight", "git.image_encoder.vision_model.embeddings.patch_embedding.weight")
|
87 |
+
)
|
88 |
+
rename_keys.append((f"{prefix}image_encoder.ln_pre.weight", "git.image_encoder.vision_model.pre_layrnorm.weight"))
|
89 |
+
rename_keys.append((f"{prefix}image_encoder.ln_pre.bias", "git.image_encoder.vision_model.pre_layrnorm.bias"))
|
90 |
+
rename_keys.append(
|
91 |
+
(f"{prefix}image_encoder.ln_post.weight", "git.image_encoder.vision_model.post_layernorm.weight")
|
92 |
+
)
|
93 |
+
rename_keys.append((f"{prefix}image_encoder.ln_post.bias", "git.image_encoder.vision_model.post_layernorm.bias"))
|
94 |
+
# fmt: on
|
95 |
+
rename_keys.append((f"{prefix}image_encoder.proj", "git.image_encoder.visual_projection.weight"))
|
96 |
+
|
97 |
+
# fmt: off
|
98 |
+
for i in range(config.vision_config.num_hidden_layers):
|
99 |
+
# image encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
|
100 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"))
|
101 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"))
|
102 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.weight"))
|
103 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.bias"))
|
104 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.weight"))
|
105 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.bias"))
|
106 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.weight"))
|
107 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.bias"))
|
108 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.weight"))
|
109 |
+
rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.bias"))
|
110 |
+
# fmt: on
|
111 |
+
|
112 |
+
# text decoder
|
113 |
+
# fmt: off
|
114 |
+
rename_keys.append((f"{prefix}textual.embedding.words.weight", "git.embeddings.word_embeddings.weight"))
|
115 |
+
rename_keys.append((f"{prefix}textual.embedding.positions.weight", "git.embeddings.position_embeddings.weight"))
|
116 |
+
rename_keys.append((f"{prefix}textual.visual_projection.0.weight", "git.visual_projection.visual_projection.0.weight"))
|
117 |
+
rename_keys.append((f"{prefix}textual.visual_projection.0.bias", "git.visual_projection.visual_projection.0.bias"))
|
118 |
+
rename_keys.append((f"{prefix}textual.visual_projection.1.weight", "git.visual_projection.visual_projection.1.weight"))
|
119 |
+
rename_keys.append((f"{prefix}textual.visual_projection.1.bias", "git.visual_projection.visual_projection.1.bias"))
|
120 |
+
|
121 |
+
rename_keys.append((f"{prefix}textual.embedding.layer_norm.weight", "git.embeddings.LayerNorm.weight"))
|
122 |
+
rename_keys.append((f"{prefix}textual.embedding.layer_norm.bias", "git.embeddings.LayerNorm.bias"))
|
123 |
+
rename_keys.append((f"{prefix}textual.output.weight", "output.weight"))
|
124 |
+
rename_keys.append((f"{prefix}textual.output.bias", "output.bias"))
|
125 |
+
for i in range(config.num_hidden_layers):
|
126 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.weight", f"git.encoder.layer.{i}.attention.self.query.weight"))
|
127 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.bias", f"git.encoder.layer.{i}.attention.self.query.bias"))
|
128 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.weight", f"git.encoder.layer.{i}.attention.self.key.weight"))
|
129 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.bias", f"git.encoder.layer.{i}.attention.self.key.bias"))
|
130 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.weight", f"git.encoder.layer.{i}.attention.self.value.weight"))
|
131 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.bias", f"git.encoder.layer.{i}.attention.self.value.bias"))
|
132 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.weight", f"git.encoder.layer.{i}.attention.output.dense.weight"))
|
133 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.bias", f"git.encoder.layer.{i}.attention.output.dense.bias"))
|
134 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.weight", f"git.encoder.layer.{i}.attention.output.LayerNorm.weight"))
|
135 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.bias", f"git.encoder.layer.{i}.attention.output.LayerNorm.bias"))
|
136 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.weight", f"git.encoder.layer.{i}.intermediate.dense.weight"))
|
137 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.bias", f"git.encoder.layer.{i}.intermediate.dense.bias"))
|
138 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.weight", f"git.encoder.layer.{i}.output.dense.weight"))
|
139 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.bias", f"git.encoder.layer.{i}.output.dense.bias"))
|
140 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.weight", f"git.encoder.layer.{i}.output.LayerNorm.weight"))
|
141 |
+
rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.bias", f"git.encoder.layer.{i}.output.LayerNorm.bias"))
|
142 |
+
# fmt: on
|
143 |
+
|
144 |
+
if config.num_image_with_embedding is not None:
|
145 |
+
rename_keys.append(("img_temperal_embedding.0", "git.img_temperal_embedding.0"))
|
146 |
+
rename_keys.append(("img_temperal_embedding.1", "git.img_temperal_embedding.1"))
|
147 |
+
rename_keys.append(("img_temperal_embedding.2", "git.img_temperal_embedding.2"))
|
148 |
+
rename_keys.append(("img_temperal_embedding.3", "git.img_temperal_embedding.3"))
|
149 |
+
rename_keys.append(("img_temperal_embedding.4", "git.img_temperal_embedding.4"))
|
150 |
+
rename_keys.append(("img_temperal_embedding.5", "git.img_temperal_embedding.5"))
|
151 |
+
|
152 |
+
return rename_keys
|
153 |
+
|
154 |
+
|
155 |
+
def rename_key(dct, old, new):
|
156 |
+
val = dct.pop(old)
|
157 |
+
dct[new] = val.T if "image_encoder.visual_projection" in new else val
|
158 |
+
|
159 |
+
|
160 |
+
# we split up the matrix of each CLIP encoder layer into queries, keys and values
|
161 |
+
def read_in_q_k_v(state_dict, config, prefix=""):
|
162 |
+
dim = config.vision_config.hidden_size
|
163 |
+
for i in range(config.vision_config.num_hidden_layers):
|
164 |
+
# read in weights + bias of input projection layer (in the original implementation, this is a single matrix + bias)
|
165 |
+
in_proj_weight = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_weight")
|
166 |
+
in_proj_bias = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_bias")
|
167 |
+
# next, add query, keys and values (in that order) to the state dict
|
168 |
+
state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[
|
169 |
+
:dim, :
|
170 |
+
]
|
171 |
+
state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:dim]
|
172 |
+
state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
|
173 |
+
dim : dim * 2, :
|
174 |
+
]
|
175 |
+
state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[
|
176 |
+
dim : dim * 2
|
177 |
+
]
|
178 |
+
state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[
|
179 |
+
-dim:, :
|
180 |
+
]
|
181 |
+
state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-dim:]
|
182 |
+
|
183 |
+
|
184 |
+
# We will verify our results on an image
|
185 |
+
def prepare_img(model_name):
|
186 |
+
if "textvqa" in model_name:
|
187 |
+
filepath = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
|
188 |
+
image = Image.open(filepath).convert("RGB")
|
189 |
+
else:
|
190 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
191 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
192 |
+
|
193 |
+
return image
|
194 |
+
|
195 |
+
|
196 |
+
def prepare_video():
|
197 |
+
from decord import VideoReader, cpu
|
198 |
+
|
199 |
+
# set seed for reproducability
|
200 |
+
np.random.seed(0)
|
201 |
+
|
202 |
+
def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
|
203 |
+
"""
|
204 |
+
Sample a given number of frame indices from the video.
|
205 |
+
|
206 |
+
Args:
|
207 |
+
clip_len (`int`): Total number of frames to sample.
|
208 |
+
frame_sample_rate (`int`): Sample every n-th frame.
|
209 |
+
seg_len (`int`): Maximum allowed index of sample's last frame.
|
210 |
+
|
211 |
+
Returns:
|
212 |
+
indices (`List[int]`): List of sampled frame indices
|
213 |
+
"""
|
214 |
+
converted_len = int(clip_len * frame_sample_rate)
|
215 |
+
end_idx = np.random.randint(converted_len, seg_len)
|
216 |
+
start_idx = end_idx - converted_len
|
217 |
+
indices = np.linspace(start_idx, end_idx, num=clip_len)
|
218 |
+
indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
|
219 |
+
return indices
|
220 |
+
|
221 |
+
# video clip consists of 300 frames (10 seconds at 30 FPS)
|
222 |
+
file_path = hf_hub_download(repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset")
|
223 |
+
videoreader = VideoReader(file_path, num_threads=1, ctx=cpu(0))
|
224 |
+
|
225 |
+
# sample 6 frames
|
226 |
+
videoreader.seek(0)
|
227 |
+
indices = sample_frame_indices(clip_len=6, frame_sample_rate=4, seg_len=len(videoreader))
|
228 |
+
video = videoreader.get_batch(indices).asnumpy()
|
229 |
+
|
230 |
+
return video
|
231 |
+
|
232 |
+
|
233 |
+
@torch.no_grad()
|
234 |
+
def convert_git_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
|
235 |
+
"""
|
236 |
+
Copy/paste/tweak model's weights to our GIT structure.
|
237 |
+
"""
|
238 |
+
|
239 |
+
model_name_to_url = {
|
240 |
+
"git-base": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE/snapshot/model.pt",
|
241 |
+
"git-base-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_COCO/snapshot/model.pt",
|
242 |
+
"git-base-textcaps": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTCAPS/snapshot/model.pt",
|
243 |
+
"git-base-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VQAv2/snapshot/model.pt",
|
244 |
+
"git-base-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTVQA/snapshot/model.pt", # todo
|
245 |
+
"git-base-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VATEX/snapshot/model.pt",
|
246 |
+
"git-base-msrvtt-qa": (
|
247 |
+
"https://publicgit.blob.core.windows.net/data/output/GIT_BASE_MSRVTT_QA/snapshot/model.pt"
|
248 |
+
),
|
249 |
+
"git-large": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE/snapshot/model.pt",
|
250 |
+
"git-large-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_COCO/snapshot/model.pt",
|
251 |
+
"git-large-textcaps": (
|
252 |
+
"https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTCAPS/snapshot/model.pt"
|
253 |
+
),
|
254 |
+
"git-large-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VQAv2/snapshot/model.pt",
|
255 |
+
"git-large-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTVQA/snapshot/model.pt",
|
256 |
+
"git-large-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VATEX/snapshot/model.pt",
|
257 |
+
"git-large-msrvtt-qa": (
|
258 |
+
"https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_MSRVTT_QA/snapshot/model.pt"
|
259 |
+
),
|
260 |
+
"git-large-r": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R/snapshot/model.pt",
|
261 |
+
"git-large-r-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_COCO/snapshot/model.pt",
|
262 |
+
"git-large-r-textcaps": (
|
263 |
+
"https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_TEXTCAPS/snapshot/model.pt"
|
264 |
+
),
|
265 |
+
}
|
266 |
+
|
267 |
+
model_name_to_path = {
|
268 |
+
"git-large": "/Users/nielsrogge/Documents/GIT/git_large_model.pt",
|
269 |
+
"git-large-coco": "/Users/nielsrogge/Documents/GIT/git_large_coco_model.pt",
|
270 |
+
"git-large-textcaps": "/Users/nielsrogge/Documents/GIT/git_large_textcaps_model.pt",
|
271 |
+
"git-large-vqav2": "/Users/nielsrogge/Documents/GIT/git_large_vqav2_model.pt",
|
272 |
+
"git-large-textvqa": "/Users/nielsrogge/Documents/GIT/git_large_textvqa_model.pt",
|
273 |
+
}
|
274 |
+
|
275 |
+
# define GIT configuration based on model name
|
276 |
+
config, image_size, is_video = get_git_config(model_name)
|
277 |
+
if "large" in model_name and not is_video and "large-r" not in model_name:
|
278 |
+
# large checkpoints take way too long to download
|
279 |
+
checkpoint_path = model_name_to_path[model_name]
|
280 |
+
state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
|
281 |
+
else:
|
282 |
+
checkpoint_url = model_name_to_url[model_name]
|
283 |
+
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", file_name=model_name)[
|
284 |
+
"model"
|
285 |
+
]
|
286 |
+
# rename keys
|
287 |
+
prefix = "module." if model_name == "git-base" else ""
|
288 |
+
rename_keys = create_rename_keys(config, prefix=prefix)
|
289 |
+
for src, dest in rename_keys:
|
290 |
+
rename_key(state_dict, src, dest)
|
291 |
+
read_in_q_k_v(state_dict, config, prefix=prefix)
|
292 |
+
|
293 |
+
# load HuggingFace model
|
294 |
+
model = GitForCausalLM(config)
|
295 |
+
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
|
296 |
+
model.eval()
|
297 |
+
|
298 |
+
print("Missing keys:", missing_keys)
|
299 |
+
print("Unexpected keys:", unexpected_keys)
|
300 |
+
|
301 |
+
assert missing_keys == ["git.embeddings.position_ids", "git.image_encoder.vision_model.embeddings.position_ids"]
|
302 |
+
assert unexpected_keys == ["git.image_encoder.visual_projection.weight"]
|
303 |
+
|
304 |
+
# verify results
|
305 |
+
image_processor = (
|
306 |
+
VideoMAEImageProcessor(
|
307 |
+
size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size}
|
308 |
+
)
|
309 |
+
if is_video
|
310 |
+
else CLIPImageProcessor(
|
311 |
+
size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size}
|
312 |
+
)
|
313 |
+
)
|
314 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
315 |
+
"google-bert/bert-base-uncased", model_input_names=["input_ids", "attention_mask"]
|
316 |
+
)
|
317 |
+
processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
|
318 |
+
|
319 |
+
if is_video:
|
320 |
+
video = prepare_video()
|
321 |
+
pixel_values = processor(images=list(video), return_tensors="pt").pixel_values
|
322 |
+
else:
|
323 |
+
image = prepare_img(model_name)
|
324 |
+
image_transforms = Compose(
|
325 |
+
[
|
326 |
+
Resize(image_size, interpolation=Image.BICUBIC),
|
327 |
+
CenterCrop(image_size),
|
328 |
+
ToTensor(),
|
329 |
+
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
|
330 |
+
]
|
331 |
+
)
|
332 |
+
original_pixel_values = image_transforms(image).unsqueeze(0)
|
333 |
+
pixel_values = processor(images=image, return_tensors="pt").pixel_values
|
334 |
+
|
335 |
+
assert torch.allclose(pixel_values, original_pixel_values)
|
336 |
+
|
337 |
+
input_ids = torch.tensor([[101]])
|
338 |
+
outputs = model(input_ids, pixel_values=pixel_values)
|
339 |
+
logits = outputs.logits
|
340 |
+
print("Logits:", logits[0, -1, :3])
|
341 |
+
|
342 |
+
if model_name == "git-base":
|
343 |
+
expected_slice_logits = torch.tensor([-1.2832, -1.2835, -1.2840])
|
344 |
+
elif model_name == "git-base-coco":
|
345 |
+
expected_slice_logits = torch.tensor([-0.9925, -0.9930, -0.9935])
|
346 |
+
elif model_name == "git-base-textcaps":
|
347 |
+
expected_slice_logits = torch.tensor([-1.2980, -1.2983, -1.2985])
|
348 |
+
elif model_name == "git-base-vqav2":
|
349 |
+
expected_slice_logits = torch.tensor([-0.8570, -0.8568, -0.8561])
|
350 |
+
elif model_name == "git-base-textvqa":
|
351 |
+
expected_slice_logits = torch.tensor([-1.4085, -1.4083, -1.4082])
|
352 |
+
elif model_name == "git-base-vatex":
|
353 |
+
expected_slice_logits = torch.tensor([-1.3451, -1.3447, -1.3447])
|
354 |
+
elif model_name == "git-base-msrvtt-qa":
|
355 |
+
expected_slice_logits = torch.tensor([-0.8554, -0.8550, -0.8540])
|
356 |
+
elif model_name == "git-large":
|
357 |
+
expected_slice_logits = torch.tensor([-1.1708, -1.1707, -1.1705])
|
358 |
+
elif model_name == "git-large-coco":
|
359 |
+
expected_slice_logits = torch.tensor([-1.0425, -1.0423, -1.0422])
|
360 |
+
elif model_name == "git-large-textcaps":
|
361 |
+
expected_slice_logits = torch.tensor([-1.2705, -1.2708, -1.2706])
|
362 |
+
elif model_name == "git-large-vqav2":
|
363 |
+
expected_slice_logits = torch.tensor([-0.7042, -0.7043, -0.7043])
|
364 |
+
elif model_name == "git-large-textvqa":
|
365 |
+
expected_slice_logits = torch.tensor([-0.8590, -0.8592, -0.8590])
|
366 |
+
elif model_name == "git-large-vatex":
|
367 |
+
expected_slice_logits = torch.tensor([-1.0113, -1.0114, -1.0113])
|
368 |
+
elif model_name == "git-large-msrvtt-qa":
|
369 |
+
expected_slice_logits = torch.tensor([0.0130, 0.0134, 0.0131])
|
370 |
+
elif model_name == "git-large-r":
|
371 |
+
expected_slice_logits = torch.tensor([-1.1283, -1.1285, -1.1286])
|
372 |
+
elif model_name == "git-large-r-coco":
|
373 |
+
expected_slice_logits = torch.tensor([-0.9641, -0.9641, -0.9641])
|
374 |
+
elif model_name == "git-large-r-textcaps":
|
375 |
+
expected_slice_logits = torch.tensor([-1.1121, -1.1120, -1.1124])
|
376 |
+
|
377 |
+
assert torch.allclose(logits[0, -1, :3], expected_slice_logits, atol=1e-4)
|
378 |
+
print("Looks ok!")
|
379 |
+
|
380 |
+
prompt = ""
|
381 |
+
if "textvqa" in model_name:
|
382 |
+
prompt = "what does the front of the bus say at the top?"
|
383 |
+
elif "msrvtt-qa" in model_name:
|
384 |
+
prompt = "what does the woman eat?"
|
385 |
+
elif "vqa" in model_name:
|
386 |
+
prompt = "what are the cats doing?"
|
387 |
+
input_ids = tokenizer(prompt, add_special_tokens=False).input_ids
|
388 |
+
input_ids = [processor.tokenizer.cls_token_id] + input_ids
|
389 |
+
input_ids = torch.tensor(input_ids).unsqueeze(0)
|
390 |
+
print("Generating caption...")
|
391 |
+
generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
|
392 |
+
print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
|
393 |
+
|
394 |
+
if pytorch_dump_folder_path is not None:
|
395 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
396 |
+
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}")
|
397 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
398 |
+
processor.save_pretrained(pytorch_dump_folder_path)
|
399 |
+
|
400 |
+
if push_to_hub:
|
401 |
+
print(f"Pushing model and processor of {model_name} to the hub...")
|
402 |
+
model.push_to_hub(f"microsoft/{model_name}")
|
403 |
+
processor.push_to_hub(f"microsoft/{model_name}")
|
404 |
+
|
405 |
+
|
406 |
+
if __name__ == "__main__":
|
407 |
+
parser = argparse.ArgumentParser()
|
408 |
+
# Required parameters
|
409 |
+
parser.add_argument(
|
410 |
+
"--model_name",
|
411 |
+
default="git-base",
|
412 |
+
type=str,
|
413 |
+
help="Name of the model you'd like to convert.",
|
414 |
+
)
|
415 |
+
parser.add_argument(
|
416 |
+
"--pytorch_dump_folder_path",
|
417 |
+
default=None,
|
418 |
+
type=str,
|
419 |
+
help="Path to the output PyTorch model directory.",
|
420 |
+
)
|
421 |
+
parser.add_argument(
|
422 |
+
"--push_to_hub",
|
423 |
+
action="store_true",
|
424 |
+
help="Whether to push the model to the hub.",
|
425 |
+
)
|
426 |
+
|
427 |
+
args = parser.parse_args()
|
428 |
+
convert_git_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/modeling_git.py
ADDED
@@ -0,0 +1,1543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
|
3 |
+
# All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""PyTorch GIT model."""
|
17 |
+
|
18 |
+
|
19 |
+
import math
|
20 |
+
from dataclasses import dataclass
|
21 |
+
from typing import List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.utils.checkpoint
|
25 |
+
from torch import nn
|
26 |
+
from torch.nn import CrossEntropyLoss
|
27 |
+
|
28 |
+
from ...activations import ACT2FN
|
29 |
+
from ...file_utils import ModelOutput
|
30 |
+
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
|
31 |
+
from ...modeling_outputs import (
|
32 |
+
BaseModelOutput,
|
33 |
+
BaseModelOutputWithPast,
|
34 |
+
BaseModelOutputWithPooling,
|
35 |
+
CausalLMOutputWithPast,
|
36 |
+
)
|
37 |
+
from ...modeling_utils import PreTrainedModel
|
38 |
+
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
|
39 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
40 |
+
from .configuration_git import GitConfig, GitVisionConfig
|
41 |
+
|
42 |
+
|
43 |
+
logger = logging.get_logger(__name__)
|
44 |
+
|
45 |
+
_CHECKPOINT_FOR_DOC = "microsoft/git-base"
|
46 |
+
_CONFIG_FOR_DOC = "GitConfig"
|
47 |
+
|
48 |
+
|
49 |
+
from ..deprecated._archive_maps import GIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
50 |
+
|
51 |
+
|
52 |
+
@dataclass
|
53 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Git
|
54 |
+
class GitVisionModelOutput(ModelOutput):
|
55 |
+
"""
|
56 |
+
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
60 |
+
The image embeddings obtained by applying the projection layer to the pooler_output.
|
61 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
62 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
63 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
64 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
65 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
66 |
+
|
67 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
68 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
69 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
70 |
+
sequence_length)`.
|
71 |
+
|
72 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
73 |
+
heads.
|
74 |
+
"""
|
75 |
+
|
76 |
+
image_embeds: Optional[torch.FloatTensor] = None
|
77 |
+
last_hidden_state: torch.FloatTensor = None
|
78 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
79 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
80 |
+
|
81 |
+
|
82 |
+
class GitEmbeddings(nn.Module):
|
83 |
+
"""Construct the embeddings from word and position embeddings."""
|
84 |
+
|
85 |
+
def __init__(self, config):
|
86 |
+
super().__init__()
|
87 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
88 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
89 |
+
|
90 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
91 |
+
# any TensorFlow checkpoint file
|
92 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
93 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
94 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
95 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
96 |
+
self.register_buffer(
|
97 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
98 |
+
)
|
99 |
+
|
100 |
+
def forward(
|
101 |
+
self,
|
102 |
+
input_ids: Optional[torch.LongTensor] = None,
|
103 |
+
position_ids: Optional[torch.LongTensor] = None,
|
104 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
105 |
+
past_key_values_length: int = 0,
|
106 |
+
) -> torch.Tensor:
|
107 |
+
if input_ids is not None:
|
108 |
+
input_shape = input_ids.size()
|
109 |
+
else:
|
110 |
+
input_shape = inputs_embeds.size()[:-1]
|
111 |
+
|
112 |
+
seq_length = input_shape[1]
|
113 |
+
|
114 |
+
if position_ids is None:
|
115 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
116 |
+
|
117 |
+
if inputs_embeds is None:
|
118 |
+
embeddings = self.word_embeddings(input_ids)
|
119 |
+
else:
|
120 |
+
embeddings = inputs_embeds
|
121 |
+
|
122 |
+
if self.position_embedding_type == "absolute":
|
123 |
+
position_embeddings = self.position_embeddings(position_ids)
|
124 |
+
embeddings += position_embeddings
|
125 |
+
embeddings = self.LayerNorm(embeddings)
|
126 |
+
embeddings = self.dropout(embeddings)
|
127 |
+
return embeddings
|
128 |
+
|
129 |
+
|
130 |
+
class GitSelfAttention(nn.Module):
|
131 |
+
def __init__(self, config, position_embedding_type=None):
|
132 |
+
super().__init__()
|
133 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
134 |
+
raise ValueError(
|
135 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
136 |
+
f"heads ({config.num_attention_heads})"
|
137 |
+
)
|
138 |
+
|
139 |
+
self.num_attention_heads = config.num_attention_heads
|
140 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
141 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
142 |
+
self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
|
143 |
+
if config.num_image_with_embedding is not None:
|
144 |
+
self.image_patch_tokens *= config.num_image_with_embedding
|
145 |
+
|
146 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
147 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
148 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
149 |
+
|
150 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
151 |
+
self.position_embedding_type = position_embedding_type or getattr(
|
152 |
+
config, "position_embedding_type", "absolute"
|
153 |
+
)
|
154 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
155 |
+
self.max_position_embeddings = config.max_position_embeddings
|
156 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
157 |
+
|
158 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
159 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
160 |
+
x = x.view(new_x_shape)
|
161 |
+
return x.permute(0, 2, 1, 3)
|
162 |
+
|
163 |
+
def forward(
|
164 |
+
self,
|
165 |
+
hidden_states: torch.Tensor,
|
166 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
167 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
168 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
169 |
+
output_attentions: Optional[bool] = False,
|
170 |
+
pixel_values_present: Optional[bool] = False,
|
171 |
+
) -> Tuple[torch.Tensor]:
|
172 |
+
mixed_query_layer = self.query(hidden_states)
|
173 |
+
|
174 |
+
cutoff = self.image_patch_tokens if pixel_values_present else 0
|
175 |
+
if past_key_value is not None:
|
176 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
177 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
178 |
+
key_layer = torch.cat([key_layer[:, :, :cutoff, :], past_key_value[0], key_layer[:, :, -1:, :]], dim=2)
|
179 |
+
value_layer = torch.cat(
|
180 |
+
[value_layer[:, :, :cutoff, :], past_key_value[1], value_layer[:, :, -1:, :]], dim=2
|
181 |
+
)
|
182 |
+
else:
|
183 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
184 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
185 |
+
|
186 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
187 |
+
|
188 |
+
use_cache = past_key_value is not None
|
189 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
190 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
191 |
+
# key/value_states (first "if" case)
|
192 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
193 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
194 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
195 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
196 |
+
# NOTE: like in other caches, we store the text component. In GIT it means we discard the image component.
|
197 |
+
past_key_value = (
|
198 |
+
key_layer[:, :, cutoff:, :],
|
199 |
+
value_layer[:, :, cutoff:, :],
|
200 |
+
)
|
201 |
+
|
202 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
203 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
204 |
+
|
205 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
206 |
+
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
|
207 |
+
if use_cache:
|
208 |
+
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
|
209 |
+
-1, 1
|
210 |
+
)
|
211 |
+
else:
|
212 |
+
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
213 |
+
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
214 |
+
distance = position_ids_l - position_ids_r
|
215 |
+
|
216 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
217 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
218 |
+
|
219 |
+
if self.position_embedding_type == "relative_key":
|
220 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
221 |
+
attention_scores = attention_scores + relative_position_scores
|
222 |
+
elif self.position_embedding_type == "relative_key_query":
|
223 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
224 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
225 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
226 |
+
|
227 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
228 |
+
if attention_mask is not None:
|
229 |
+
# Apply the attention mask is (precomputed for all layers in GitModel forward() function)
|
230 |
+
attention_scores = attention_scores + attention_mask
|
231 |
+
|
232 |
+
# Normalize the attention scores to probabilities.
|
233 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
234 |
+
|
235 |
+
# This is actually dropping out entire tokens to attend to, which might
|
236 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
237 |
+
attention_probs = self.dropout(attention_probs)
|
238 |
+
|
239 |
+
# Mask heads if we want to
|
240 |
+
if head_mask is not None:
|
241 |
+
attention_probs = attention_probs * head_mask
|
242 |
+
|
243 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
244 |
+
|
245 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
246 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
247 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
248 |
+
|
249 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
250 |
+
|
251 |
+
outputs = outputs + (past_key_value,)
|
252 |
+
return outputs
|
253 |
+
|
254 |
+
|
255 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
|
256 |
+
class GitSelfOutput(nn.Module):
|
257 |
+
def __init__(self, config):
|
258 |
+
super().__init__()
|
259 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
260 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
261 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
262 |
+
|
263 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
264 |
+
hidden_states = self.dense(hidden_states)
|
265 |
+
hidden_states = self.dropout(hidden_states)
|
266 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
267 |
+
return hidden_states
|
268 |
+
|
269 |
+
|
270 |
+
class GitAttention(nn.Module):
|
271 |
+
# Copied from transformers.models.bert.modeling_bert.BertAttention.__init__ with Bert->Git
|
272 |
+
def __init__(self, config, position_embedding_type=None):
|
273 |
+
super().__init__()
|
274 |
+
self.self = GitSelfAttention(config, position_embedding_type=position_embedding_type)
|
275 |
+
self.output = GitSelfOutput(config)
|
276 |
+
self.pruned_heads = set()
|
277 |
+
|
278 |
+
# Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
|
279 |
+
def prune_heads(self, heads):
|
280 |
+
if len(heads) == 0:
|
281 |
+
return
|
282 |
+
heads, index = find_pruneable_heads_and_indices(
|
283 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
284 |
+
)
|
285 |
+
|
286 |
+
# Prune linear layers
|
287 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
288 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
289 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
290 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
291 |
+
|
292 |
+
# Update hyper params and store pruned heads
|
293 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
294 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
295 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
296 |
+
|
297 |
+
def forward(
|
298 |
+
self,
|
299 |
+
hidden_states: torch.Tensor,
|
300 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
301 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
302 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
303 |
+
output_attentions: Optional[bool] = False,
|
304 |
+
pixel_values_present: Optional[bool] = False,
|
305 |
+
) -> Tuple[torch.Tensor]:
|
306 |
+
self_outputs = self.self(
|
307 |
+
hidden_states,
|
308 |
+
attention_mask,
|
309 |
+
head_mask,
|
310 |
+
past_key_value,
|
311 |
+
output_attentions,
|
312 |
+
pixel_values_present,
|
313 |
+
)
|
314 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
315 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
316 |
+
return outputs
|
317 |
+
|
318 |
+
|
319 |
+
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
|
320 |
+
class GitIntermediate(nn.Module):
|
321 |
+
def __init__(self, config):
|
322 |
+
super().__init__()
|
323 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
324 |
+
if isinstance(config.hidden_act, str):
|
325 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
326 |
+
else:
|
327 |
+
self.intermediate_act_fn = config.hidden_act
|
328 |
+
|
329 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
330 |
+
hidden_states = self.dense(hidden_states)
|
331 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
332 |
+
return hidden_states
|
333 |
+
|
334 |
+
|
335 |
+
# Copied from transformers.models.bert.modeling_bert.BertOutput
|
336 |
+
class GitOutput(nn.Module):
|
337 |
+
def __init__(self, config):
|
338 |
+
super().__init__()
|
339 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
340 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
341 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
342 |
+
|
343 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
344 |
+
hidden_states = self.dense(hidden_states)
|
345 |
+
hidden_states = self.dropout(hidden_states)
|
346 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
347 |
+
return hidden_states
|
348 |
+
|
349 |
+
|
350 |
+
class GitLayer(nn.Module):
|
351 |
+
def __init__(self, config):
|
352 |
+
super().__init__()
|
353 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
354 |
+
self.seq_len_dim = 1
|
355 |
+
self.attention = GitAttention(config)
|
356 |
+
self.intermediate = GitIntermediate(config)
|
357 |
+
self.output = GitOutput(config)
|
358 |
+
|
359 |
+
def forward(
|
360 |
+
self,
|
361 |
+
hidden_states: torch.Tensor,
|
362 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
363 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
364 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
365 |
+
output_attentions: Optional[bool] = False,
|
366 |
+
pixel_values_present: Optional[bool] = False,
|
367 |
+
) -> Tuple[torch.Tensor]:
|
368 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
369 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
370 |
+
self_attention_outputs = self.attention(
|
371 |
+
hidden_states,
|
372 |
+
attention_mask,
|
373 |
+
head_mask,
|
374 |
+
output_attentions=output_attentions,
|
375 |
+
past_key_value=self_attn_past_key_value,
|
376 |
+
pixel_values_present=pixel_values_present,
|
377 |
+
)
|
378 |
+
attention_output = self_attention_outputs[0]
|
379 |
+
|
380 |
+
# if decoder, the last output is tuple of self-attn cache
|
381 |
+
outputs = self_attention_outputs[1:-1]
|
382 |
+
present_key_value = self_attention_outputs[-1]
|
383 |
+
|
384 |
+
layer_output = apply_chunking_to_forward(
|
385 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
386 |
+
)
|
387 |
+
outputs = (layer_output,) + outputs
|
388 |
+
|
389 |
+
# if decoder, return the attn key/values as the last output
|
390 |
+
outputs = outputs + (present_key_value,)
|
391 |
+
|
392 |
+
return outputs
|
393 |
+
|
394 |
+
def feed_forward_chunk(self, attention_output):
|
395 |
+
intermediate_output = self.intermediate(attention_output)
|
396 |
+
layer_output = self.output(intermediate_output, attention_output)
|
397 |
+
return layer_output
|
398 |
+
|
399 |
+
|
400 |
+
class GitEncoder(nn.Module):
|
401 |
+
# Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Git
|
402 |
+
def __init__(self, config):
|
403 |
+
super().__init__()
|
404 |
+
self.config = config
|
405 |
+
self.layer = nn.ModuleList([GitLayer(config) for _ in range(config.num_hidden_layers)])
|
406 |
+
self.gradient_checkpointing = False
|
407 |
+
|
408 |
+
def forward(
|
409 |
+
self,
|
410 |
+
hidden_states: torch.Tensor,
|
411 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
412 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
413 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
414 |
+
use_cache: Optional[bool] = None,
|
415 |
+
output_attentions: Optional[bool] = False,
|
416 |
+
output_hidden_states: Optional[bool] = False,
|
417 |
+
pixel_values_present: Optional[bool] = False,
|
418 |
+
return_dict: Optional[bool] = True,
|
419 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
|
420 |
+
if self.gradient_checkpointing and self.training:
|
421 |
+
if use_cache:
|
422 |
+
logger.warning_once(
|
423 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
424 |
+
)
|
425 |
+
use_cache = False
|
426 |
+
|
427 |
+
all_hidden_states = () if output_hidden_states else None
|
428 |
+
all_self_attentions = () if output_attentions else None
|
429 |
+
|
430 |
+
next_decoder_cache = () if use_cache else None
|
431 |
+
for i, layer_module in enumerate(self.layer):
|
432 |
+
if output_hidden_states:
|
433 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
434 |
+
|
435 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
436 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
437 |
+
|
438 |
+
if self.gradient_checkpointing and self.training:
|
439 |
+
layer_outputs = self._gradient_checkpointing_func(
|
440 |
+
layer_module.__call__,
|
441 |
+
hidden_states,
|
442 |
+
attention_mask,
|
443 |
+
layer_head_mask,
|
444 |
+
past_key_value,
|
445 |
+
output_attentions,
|
446 |
+
)
|
447 |
+
else:
|
448 |
+
layer_outputs = layer_module(
|
449 |
+
hidden_states,
|
450 |
+
attention_mask,
|
451 |
+
layer_head_mask,
|
452 |
+
past_key_value,
|
453 |
+
output_attentions,
|
454 |
+
pixel_values_present,
|
455 |
+
)
|
456 |
+
|
457 |
+
hidden_states = layer_outputs[0]
|
458 |
+
if use_cache:
|
459 |
+
next_decoder_cache += (layer_outputs[-1],)
|
460 |
+
if output_attentions:
|
461 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
462 |
+
|
463 |
+
if output_hidden_states:
|
464 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
465 |
+
|
466 |
+
if not return_dict:
|
467 |
+
return tuple(
|
468 |
+
v
|
469 |
+
for v in [
|
470 |
+
hidden_states,
|
471 |
+
next_decoder_cache,
|
472 |
+
all_hidden_states,
|
473 |
+
all_self_attentions,
|
474 |
+
]
|
475 |
+
if v is not None
|
476 |
+
)
|
477 |
+
return BaseModelOutputWithPast(
|
478 |
+
last_hidden_state=hidden_states,
|
479 |
+
past_key_values=next_decoder_cache,
|
480 |
+
hidden_states=all_hidden_states,
|
481 |
+
attentions=all_self_attentions,
|
482 |
+
)
|
483 |
+
|
484 |
+
|
485 |
+
class GitPreTrainedModel(PreTrainedModel):
|
486 |
+
"""
|
487 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
488 |
+
models.
|
489 |
+
"""
|
490 |
+
|
491 |
+
config_class = GitConfig
|
492 |
+
base_model_prefix = "git"
|
493 |
+
supports_gradient_checkpointing = True
|
494 |
+
|
495 |
+
def _init_weights(self, module):
|
496 |
+
"""Initialize the weights"""
|
497 |
+
if isinstance(module, GitVisionEmbeddings):
|
498 |
+
nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range)
|
499 |
+
nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range)
|
500 |
+
nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range)
|
501 |
+
if isinstance(module, nn.Linear):
|
502 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
503 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
504 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
505 |
+
if module.bias is not None:
|
506 |
+
module.bias.data.zero_()
|
507 |
+
elif isinstance(module, nn.Embedding):
|
508 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
509 |
+
if module.padding_idx is not None:
|
510 |
+
module.weight.data[module.padding_idx].zero_()
|
511 |
+
elif isinstance(module, nn.LayerNorm):
|
512 |
+
module.bias.data.zero_()
|
513 |
+
module.weight.data.fill_(1.0)
|
514 |
+
|
515 |
+
|
516 |
+
GIT_START_DOCSTRING = r"""
|
517 |
+
|
518 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
519 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
520 |
+
etc.)
|
521 |
+
|
522 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
523 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
524 |
+
and behavior.
|
525 |
+
|
526 |
+
Parameters:
|
527 |
+
config ([`GitConfig`]): Model configuration class with all the parameters of the model.
|
528 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
529 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
530 |
+
"""
|
531 |
+
|
532 |
+
GIT_INPUTS_DOCSTRING = r"""
|
533 |
+
Args:
|
534 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
535 |
+
Indices of input sequence tokens in the vocabulary.
|
536 |
+
|
537 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
538 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
539 |
+
|
540 |
+
[What are input IDs?](../glossary#input-ids)
|
541 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
542 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
543 |
+
|
544 |
+
- 1 for tokens that are **not masked**,
|
545 |
+
- 0 for tokens that are **masked**.
|
546 |
+
|
547 |
+
[What are attention masks?](../glossary#attention-mask)
|
548 |
+
|
549 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
550 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
551 |
+
config.max_position_embeddings - 1]`.
|
552 |
+
|
553 |
+
[What are position IDs?](../glossary#position-ids)
|
554 |
+
|
555 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
556 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
557 |
+
[`CLIPImageProcessor.__call__`] for details.
|
558 |
+
|
559 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
560 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
561 |
+
|
562 |
+
- 1 indicates the head is **not masked**,
|
563 |
+
- 0 indicates the head is **masked**.
|
564 |
+
|
565 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
566 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
567 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
568 |
+
model's internal embedding lookup matrix.
|
569 |
+
output_attentions (`bool`, *optional*):
|
570 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
571 |
+
tensors for more detail.
|
572 |
+
output_hidden_states (`bool`, *optional*):
|
573 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
574 |
+
more detail.
|
575 |
+
return_dict (`bool`, *optional*):
|
576 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
577 |
+
"""
|
578 |
+
|
579 |
+
|
580 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Git
|
581 |
+
class GitVisionEmbeddings(nn.Module):
|
582 |
+
def __init__(self, config: GitVisionConfig):
|
583 |
+
super().__init__()
|
584 |
+
self.config = config
|
585 |
+
self.embed_dim = config.hidden_size
|
586 |
+
self.image_size = config.image_size
|
587 |
+
self.patch_size = config.patch_size
|
588 |
+
|
589 |
+
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
590 |
+
|
591 |
+
self.patch_embedding = nn.Conv2d(
|
592 |
+
in_channels=config.num_channels,
|
593 |
+
out_channels=self.embed_dim,
|
594 |
+
kernel_size=self.patch_size,
|
595 |
+
stride=self.patch_size,
|
596 |
+
bias=False,
|
597 |
+
)
|
598 |
+
|
599 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
600 |
+
self.num_positions = self.num_patches + 1
|
601 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
602 |
+
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
603 |
+
|
604 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
605 |
+
batch_size = pixel_values.shape[0]
|
606 |
+
target_dtype = self.patch_embedding.weight.dtype
|
607 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
608 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
609 |
+
|
610 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
611 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
612 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
613 |
+
return embeddings
|
614 |
+
|
615 |
+
|
616 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPMLP
|
617 |
+
class GitVisionMLP(nn.Module):
|
618 |
+
def __init__(self, config):
|
619 |
+
super().__init__()
|
620 |
+
self.config = config
|
621 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
622 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
623 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
624 |
+
|
625 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
626 |
+
hidden_states = self.fc1(hidden_states)
|
627 |
+
hidden_states = self.activation_fn(hidden_states)
|
628 |
+
hidden_states = self.fc2(hidden_states)
|
629 |
+
return hidden_states
|
630 |
+
|
631 |
+
|
632 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPAttention
|
633 |
+
class GitVisionAttention(nn.Module):
|
634 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
635 |
+
|
636 |
+
def __init__(self, config):
|
637 |
+
super().__init__()
|
638 |
+
self.config = config
|
639 |
+
self.embed_dim = config.hidden_size
|
640 |
+
self.num_heads = config.num_attention_heads
|
641 |
+
self.head_dim = self.embed_dim // self.num_heads
|
642 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
643 |
+
raise ValueError(
|
644 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
645 |
+
f" {self.num_heads})."
|
646 |
+
)
|
647 |
+
self.scale = self.head_dim**-0.5
|
648 |
+
self.dropout = config.attention_dropout
|
649 |
+
|
650 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
651 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
652 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
653 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
654 |
+
|
655 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
656 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
657 |
+
|
658 |
+
def forward(
|
659 |
+
self,
|
660 |
+
hidden_states: torch.Tensor,
|
661 |
+
attention_mask: Optional[torch.Tensor] = None,
|
662 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
663 |
+
output_attentions: Optional[bool] = False,
|
664 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
665 |
+
"""Input shape: Batch x Time x Channel"""
|
666 |
+
|
667 |
+
bsz, tgt_len, embed_dim = hidden_states.size()
|
668 |
+
|
669 |
+
# get query proj
|
670 |
+
query_states = self.q_proj(hidden_states) * self.scale
|
671 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
672 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
673 |
+
|
674 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
675 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
676 |
+
key_states = key_states.view(*proj_shape)
|
677 |
+
value_states = value_states.view(*proj_shape)
|
678 |
+
|
679 |
+
src_len = key_states.size(1)
|
680 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
681 |
+
|
682 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
683 |
+
raise ValueError(
|
684 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
685 |
+
f" {attn_weights.size()}"
|
686 |
+
)
|
687 |
+
|
688 |
+
# apply the causal_attention_mask first
|
689 |
+
if causal_attention_mask is not None:
|
690 |
+
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
691 |
+
raise ValueError(
|
692 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
693 |
+
f" {causal_attention_mask.size()}"
|
694 |
+
)
|
695 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
|
696 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
697 |
+
|
698 |
+
if attention_mask is not None:
|
699 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
700 |
+
raise ValueError(
|
701 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
702 |
+
)
|
703 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
704 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
705 |
+
|
706 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
707 |
+
|
708 |
+
if output_attentions:
|
709 |
+
# this operation is a bit akward, but it's required to
|
710 |
+
# make sure that attn_weights keeps its gradient.
|
711 |
+
# In order to do so, attn_weights have to reshaped
|
712 |
+
# twice and have to be reused in the following
|
713 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
714 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
715 |
+
else:
|
716 |
+
attn_weights_reshaped = None
|
717 |
+
|
718 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
719 |
+
|
720 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
721 |
+
|
722 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
723 |
+
raise ValueError(
|
724 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
725 |
+
f" {attn_output.size()}"
|
726 |
+
)
|
727 |
+
|
728 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
729 |
+
attn_output = attn_output.transpose(1, 2)
|
730 |
+
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
731 |
+
|
732 |
+
attn_output = self.out_proj(attn_output)
|
733 |
+
|
734 |
+
return attn_output, attn_weights_reshaped
|
735 |
+
|
736 |
+
|
737 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GitVision
|
738 |
+
class GitVisionEncoderLayer(nn.Module):
|
739 |
+
def __init__(self, config: GitVisionConfig):
|
740 |
+
super().__init__()
|
741 |
+
self.embed_dim = config.hidden_size
|
742 |
+
self.self_attn = GitVisionAttention(config)
|
743 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
744 |
+
self.mlp = GitVisionMLP(config)
|
745 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
746 |
+
|
747 |
+
def forward(
|
748 |
+
self,
|
749 |
+
hidden_states: torch.Tensor,
|
750 |
+
attention_mask: torch.Tensor,
|
751 |
+
causal_attention_mask: torch.Tensor,
|
752 |
+
output_attentions: Optional[bool] = False,
|
753 |
+
) -> Tuple[torch.FloatTensor]:
|
754 |
+
"""
|
755 |
+
Args:
|
756 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
757 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
758 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
759 |
+
`(config.encoder_attention_heads,)`.
|
760 |
+
output_attentions (`bool`, *optional*):
|
761 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
762 |
+
returned tensors for more detail.
|
763 |
+
"""
|
764 |
+
residual = hidden_states
|
765 |
+
|
766 |
+
hidden_states = self.layer_norm1(hidden_states)
|
767 |
+
hidden_states, attn_weights = self.self_attn(
|
768 |
+
hidden_states=hidden_states,
|
769 |
+
attention_mask=attention_mask,
|
770 |
+
causal_attention_mask=causal_attention_mask,
|
771 |
+
output_attentions=output_attentions,
|
772 |
+
)
|
773 |
+
hidden_states = residual + hidden_states
|
774 |
+
|
775 |
+
residual = hidden_states
|
776 |
+
hidden_states = self.layer_norm2(hidden_states)
|
777 |
+
hidden_states = self.mlp(hidden_states)
|
778 |
+
hidden_states = residual + hidden_states
|
779 |
+
|
780 |
+
outputs = (hidden_states,)
|
781 |
+
|
782 |
+
if output_attentions:
|
783 |
+
outputs += (attn_weights,)
|
784 |
+
|
785 |
+
return outputs
|
786 |
+
|
787 |
+
|
788 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->GitVision, CLIPConfig
|
789 |
+
class GitVisionEncoder(nn.Module):
|
790 |
+
"""
|
791 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
792 |
+
[`GitVisionEncoderLayer`].
|
793 |
+
|
794 |
+
Args:
|
795 |
+
config: GitVisionConfig
|
796 |
+
"""
|
797 |
+
|
798 |
+
def __init__(self, config: GitVisionConfig):
|
799 |
+
super().__init__()
|
800 |
+
self.config = config
|
801 |
+
self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
802 |
+
self.gradient_checkpointing = False
|
803 |
+
|
804 |
+
def forward(
|
805 |
+
self,
|
806 |
+
inputs_embeds,
|
807 |
+
attention_mask: Optional[torch.Tensor] = None,
|
808 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
809 |
+
output_attentions: Optional[bool] = None,
|
810 |
+
output_hidden_states: Optional[bool] = None,
|
811 |
+
return_dict: Optional[bool] = None,
|
812 |
+
) -> Union[Tuple, BaseModelOutput]:
|
813 |
+
r"""
|
814 |
+
Args:
|
815 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
816 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
817 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
818 |
+
than the model's internal embedding lookup matrix.
|
819 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
820 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
821 |
+
|
822 |
+
- 1 for tokens that are **not masked**,
|
823 |
+
- 0 for tokens that are **masked**.
|
824 |
+
|
825 |
+
[What are attention masks?](../glossary#attention-mask)
|
826 |
+
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
827 |
+
Causal mask for the text model. Mask values selected in `[0, 1]`:
|
828 |
+
|
829 |
+
- 1 for tokens that are **not masked**,
|
830 |
+
- 0 for tokens that are **masked**.
|
831 |
+
|
832 |
+
[What are attention masks?](../glossary#attention-mask)
|
833 |
+
output_attentions (`bool`, *optional*):
|
834 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
835 |
+
returned tensors for more detail.
|
836 |
+
output_hidden_states (`bool`, *optional*):
|
837 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
838 |
+
for more detail.
|
839 |
+
return_dict (`bool`, *optional*):
|
840 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
841 |
+
"""
|
842 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
843 |
+
output_hidden_states = (
|
844 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
845 |
+
)
|
846 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
847 |
+
|
848 |
+
encoder_states = () if output_hidden_states else None
|
849 |
+
all_attentions = () if output_attentions else None
|
850 |
+
|
851 |
+
hidden_states = inputs_embeds
|
852 |
+
for idx, encoder_layer in enumerate(self.layers):
|
853 |
+
if output_hidden_states:
|
854 |
+
encoder_states = encoder_states + (hidden_states,)
|
855 |
+
if self.gradient_checkpointing and self.training:
|
856 |
+
layer_outputs = self._gradient_checkpointing_func(
|
857 |
+
encoder_layer.__call__,
|
858 |
+
hidden_states,
|
859 |
+
attention_mask,
|
860 |
+
causal_attention_mask,
|
861 |
+
output_attentions,
|
862 |
+
)
|
863 |
+
else:
|
864 |
+
layer_outputs = encoder_layer(
|
865 |
+
hidden_states,
|
866 |
+
attention_mask,
|
867 |
+
causal_attention_mask,
|
868 |
+
output_attentions=output_attentions,
|
869 |
+
)
|
870 |
+
|
871 |
+
hidden_states = layer_outputs[0]
|
872 |
+
|
873 |
+
if output_attentions:
|
874 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
875 |
+
|
876 |
+
if output_hidden_states:
|
877 |
+
encoder_states = encoder_states + (hidden_states,)
|
878 |
+
|
879 |
+
if not return_dict:
|
880 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
881 |
+
return BaseModelOutput(
|
882 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
883 |
+
)
|
884 |
+
|
885 |
+
|
886 |
+
GIT_VISION_INPUTS_DOCSTRING = r"""
|
887 |
+
Args:
|
888 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
889 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
890 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
891 |
+
output_attentions (`bool`, *optional*):
|
892 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
893 |
+
tensors for more detail.
|
894 |
+
output_hidden_states (`bool`, *optional*):
|
895 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
896 |
+
more detail.
|
897 |
+
return_dict (`bool`, *optional*):
|
898 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
899 |
+
"""
|
900 |
+
|
901 |
+
|
902 |
+
class GitVisionTransformer(nn.Module):
|
903 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPEncoder->GitVisionEncoder, CLIP->Git
|
904 |
+
def __init__(self, config: GitVisionConfig):
|
905 |
+
super().__init__()
|
906 |
+
self.config = config
|
907 |
+
embed_dim = config.hidden_size
|
908 |
+
|
909 |
+
self.embeddings = GitVisionEmbeddings(config)
|
910 |
+
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
911 |
+
self.encoder = GitVisionEncoder(config)
|
912 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
913 |
+
|
914 |
+
@add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
|
915 |
+
@replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
|
916 |
+
def forward(
|
917 |
+
self,
|
918 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
919 |
+
output_attentions: Optional[bool] = None,
|
920 |
+
output_hidden_states: Optional[bool] = None,
|
921 |
+
return_dict: Optional[bool] = None,
|
922 |
+
) -> Union[Tuple, BaseModelOutput]:
|
923 |
+
r"""
|
924 |
+
Returns:
|
925 |
+
|
926 |
+
"""
|
927 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
928 |
+
output_hidden_states = (
|
929 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
930 |
+
)
|
931 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
932 |
+
|
933 |
+
if pixel_values is None:
|
934 |
+
raise ValueError("You have to specify pixel_values")
|
935 |
+
|
936 |
+
hidden_states = self.embeddings(pixel_values)
|
937 |
+
hidden_states = self.pre_layrnorm(hidden_states)
|
938 |
+
|
939 |
+
encoder_outputs = self.encoder(
|
940 |
+
inputs_embeds=hidden_states,
|
941 |
+
output_attentions=output_attentions,
|
942 |
+
output_hidden_states=output_hidden_states,
|
943 |
+
return_dict=return_dict,
|
944 |
+
)
|
945 |
+
|
946 |
+
last_hidden_state = encoder_outputs[0]
|
947 |
+
|
948 |
+
last_hidden_state = self.post_layernorm(last_hidden_state)
|
949 |
+
|
950 |
+
if not return_dict:
|
951 |
+
return (last_hidden_state,) + encoder_outputs[1:]
|
952 |
+
|
953 |
+
return BaseModelOutput(
|
954 |
+
last_hidden_state=last_hidden_state,
|
955 |
+
hidden_states=encoder_outputs.hidden_states,
|
956 |
+
attentions=encoder_outputs.attentions,
|
957 |
+
)
|
958 |
+
|
959 |
+
|
960 |
+
@add_start_docstrings(
|
961 |
+
"""The vision model from CLIP, used in GIT, without any head or projection on top.""",
|
962 |
+
GIT_START_DOCSTRING,
|
963 |
+
)
|
964 |
+
class GitVisionModel(GitPreTrainedModel):
|
965 |
+
config_class = GitVisionConfig
|
966 |
+
main_input_name = "pixel_values"
|
967 |
+
|
968 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git
|
969 |
+
def __init__(self, config: GitVisionConfig):
|
970 |
+
super().__init__(config)
|
971 |
+
self.vision_model = GitVisionTransformer(config)
|
972 |
+
# Initialize weights and apply final processing
|
973 |
+
self.post_init()
|
974 |
+
|
975 |
+
def get_input_embeddings(self) -> nn.Module:
|
976 |
+
return self.vision_model.embeddings.patch_embedding
|
977 |
+
|
978 |
+
@add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
|
979 |
+
@replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
|
980 |
+
def forward(
|
981 |
+
self,
|
982 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
983 |
+
output_attentions: Optional[bool] = None,
|
984 |
+
output_hidden_states: Optional[bool] = None,
|
985 |
+
return_dict: Optional[bool] = None,
|
986 |
+
) -> Union[Tuple, BaseModelOutput]:
|
987 |
+
r"""
|
988 |
+
Returns:
|
989 |
+
|
990 |
+
Examples:
|
991 |
+
|
992 |
+
```python
|
993 |
+
>>> from PIL import Image
|
994 |
+
>>> import requests
|
995 |
+
>>> from transformers import AutoProcessor, GitVisionModel
|
996 |
+
|
997 |
+
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
|
998 |
+
>>> model = GitVisionModel.from_pretrained("microsoft/git-base")
|
999 |
+
|
1000 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1001 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1002 |
+
|
1003 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
1004 |
+
|
1005 |
+
>>> outputs = model(**inputs)
|
1006 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
1007 |
+
```"""
|
1008 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1009 |
+
|
1010 |
+
return self.vision_model(
|
1011 |
+
pixel_values=pixel_values,
|
1012 |
+
output_attentions=output_attentions,
|
1013 |
+
output_hidden_states=output_hidden_states,
|
1014 |
+
return_dict=return_dict,
|
1015 |
+
)
|
1016 |
+
|
1017 |
+
|
1018 |
+
class GitProjection(nn.Module):
|
1019 |
+
def __init__(self, config: GitConfig):
|
1020 |
+
super().__init__()
|
1021 |
+
self.config = config
|
1022 |
+
self.visual_projection = nn.Sequential(
|
1023 |
+
nn.Linear(config.vision_config.hidden_size, config.hidden_size),
|
1024 |
+
nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps),
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
|
1028 |
+
return self.visual_projection(embeddings)
|
1029 |
+
|
1030 |
+
|
1031 |
+
@add_start_docstrings(
|
1032 |
+
"The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states"
|
1033 |
+
" without any specific head on top.",
|
1034 |
+
GIT_START_DOCSTRING,
|
1035 |
+
)
|
1036 |
+
class GitModel(GitPreTrainedModel):
|
1037 |
+
def __init__(self, config):
|
1038 |
+
super().__init__(config)
|
1039 |
+
self.config = config
|
1040 |
+
|
1041 |
+
self.embeddings = GitEmbeddings(config)
|
1042 |
+
self.image_encoder = GitVisionModel(config.vision_config)
|
1043 |
+
self.encoder = GitEncoder(config)
|
1044 |
+
|
1045 |
+
self.visual_projection = GitProjection(config)
|
1046 |
+
|
1047 |
+
if config.num_image_with_embedding is not None:
|
1048 |
+
self.img_temperal_embedding = nn.ParameterList(
|
1049 |
+
nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size))
|
1050 |
+
for _ in range(config.num_image_with_embedding)
|
1051 |
+
)
|
1052 |
+
|
1053 |
+
# Initialize weights and apply final processing
|
1054 |
+
self.post_init()
|
1055 |
+
|
1056 |
+
def get_input_embeddings(self):
|
1057 |
+
return self.embeddings.word_embeddings
|
1058 |
+
|
1059 |
+
def set_input_embeddings(self, value):
|
1060 |
+
self.embeddings.word_embeddings = value
|
1061 |
+
|
1062 |
+
def _prune_heads(self, heads_to_prune):
|
1063 |
+
"""
|
1064 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
1065 |
+
class PreTrainedModel
|
1066 |
+
"""
|
1067 |
+
for layer, heads in heads_to_prune.items():
|
1068 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
1069 |
+
|
1070 |
+
def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
|
1071 |
+
# Default mask is for forward direction. Flip for backward direction.
|
1072 |
+
mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1)
|
1073 |
+
mask = mask.masked_fill(mask == 1, float("-inf"))
|
1074 |
+
return mask
|
1075 |
+
|
1076 |
+
def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
|
1077 |
+
num_tgt = tgt.shape[1]
|
1078 |
+
num_memory = memory.shape[1]
|
1079 |
+
device = tgt.device
|
1080 |
+
dtype = tgt.dtype
|
1081 |
+
top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
|
1082 |
+
top_right = torch.full(
|
1083 |
+
(num_memory, num_tgt + past_key_values_length),
|
1084 |
+
float("-inf"),
|
1085 |
+
device=tgt.device,
|
1086 |
+
dtype=dtype,
|
1087 |
+
)
|
1088 |
+
bottom_left = torch.zeros(
|
1089 |
+
(num_tgt, num_memory),
|
1090 |
+
dtype=dtype,
|
1091 |
+
device=tgt_mask.device,
|
1092 |
+
)
|
1093 |
+
|
1094 |
+
if past_key_values_length > 0:
|
1095 |
+
tgt_mask = torch.zeros(
|
1096 |
+
(tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length),
|
1097 |
+
dtype=dtype,
|
1098 |
+
device=tgt_mask.device,
|
1099 |
+
)
|
1100 |
+
|
1101 |
+
left = torch.cat((top_left, bottom_left), dim=0)
|
1102 |
+
right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
|
1103 |
+
|
1104 |
+
full_attention_mask = torch.cat((left, right), dim=1)[None, :]
|
1105 |
+
|
1106 |
+
if memory_key_padding_mask is None:
|
1107 |
+
memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
|
1108 |
+
# if it is False, it means valid. That is, it is not a padding
|
1109 |
+
if memory_key_padding_mask.dtype != torch.bool:
|
1110 |
+
raise ValueError("Memory key padding mask must be a boolean tensor.")
|
1111 |
+
zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
|
1112 |
+
zero_negative_infinity[memory_key_padding_mask] = float("-inf")
|
1113 |
+
full_attention_mask = full_attention_mask.expand(
|
1114 |
+
(memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt)
|
1115 |
+
)
|
1116 |
+
full_attention_mask = full_attention_mask.clone()
|
1117 |
+
origin_left = full_attention_mask[:, :, :num_memory]
|
1118 |
+
update = zero_negative_infinity[:, None, :]
|
1119 |
+
full_attention_mask[:, :, :num_memory] = origin_left + update
|
1120 |
+
|
1121 |
+
# add axis for multi-head
|
1122 |
+
full_attention_mask = full_attention_mask[:, None, :, :]
|
1123 |
+
|
1124 |
+
return full_attention_mask
|
1125 |
+
|
1126 |
+
@add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1127 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
|
1128 |
+
def forward(
|
1129 |
+
self,
|
1130 |
+
input_ids: Optional[torch.Tensor] = None,
|
1131 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1132 |
+
position_ids: Optional[torch.Tensor] = None,
|
1133 |
+
pixel_values: Optional[torch.Tensor] = None,
|
1134 |
+
head_mask: Optional[torch.Tensor] = None,
|
1135 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1136 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1137 |
+
use_cache: Optional[bool] = None,
|
1138 |
+
output_attentions: Optional[bool] = None,
|
1139 |
+
output_hidden_states: Optional[bool] = None,
|
1140 |
+
return_dict: Optional[bool] = None,
|
1141 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
|
1142 |
+
r"""
|
1143 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
1144 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
1145 |
+
|
1146 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
1147 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
1148 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1149 |
+
use_cache (`bool`, *optional*):
|
1150 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1151 |
+
`past_key_values`).
|
1152 |
+
|
1153 |
+
Returns:
|
1154 |
+
|
1155 |
+
Examples:
|
1156 |
+
|
1157 |
+
```python
|
1158 |
+
>>> from transformers import AutoProcessor, AutoModel
|
1159 |
+
>>> import requests
|
1160 |
+
>>> from PIL import Image
|
1161 |
+
|
1162 |
+
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
|
1163 |
+
>>> model = AutoModel.from_pretrained("microsoft/git-base")
|
1164 |
+
|
1165 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1166 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1167 |
+
|
1168 |
+
>>> text = "this is an image of two cats"
|
1169 |
+
|
1170 |
+
>>> inputs = processor(text, images=image, return_tensors="pt")
|
1171 |
+
|
1172 |
+
>>> outputs = model(**inputs)
|
1173 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
1174 |
+
```"""
|
1175 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1176 |
+
output_hidden_states = (
|
1177 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1178 |
+
)
|
1179 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1180 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1181 |
+
|
1182 |
+
if input_ids is not None and inputs_embeds is not None:
|
1183 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
1184 |
+
elif input_ids is not None:
|
1185 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
1186 |
+
input_shape = input_ids.size()
|
1187 |
+
elif inputs_embeds is not None:
|
1188 |
+
input_shape = inputs_embeds.size()[:-1]
|
1189 |
+
else:
|
1190 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1191 |
+
|
1192 |
+
seq_length = input_shape[1]
|
1193 |
+
|
1194 |
+
# past_key_values_length
|
1195 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
1196 |
+
|
1197 |
+
# Prepare head mask if needed
|
1198 |
+
# 1.0 in head_mask indicate we keep the head
|
1199 |
+
# attention_probs has shape bsz x n_heads x N x N
|
1200 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
1201 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
1202 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
1203 |
+
|
1204 |
+
projected_visual_features = None
|
1205 |
+
if pixel_values is not None:
|
1206 |
+
if pixel_values.ndim == 4:
|
1207 |
+
# here we assume pixel_values is of shape (batch_size, num_channels, height, width)
|
1208 |
+
visual_features = self.image_encoder(pixel_values).last_hidden_state
|
1209 |
+
|
1210 |
+
elif pixel_values.ndim == 5:
|
1211 |
+
# here we assume pixel_values is of shape (batch_size, num_frames, num_channels, height, width)
|
1212 |
+
visual_features = []
|
1213 |
+
for frame_idx in range(pixel_values.shape[1]):
|
1214 |
+
visual_features_frame = self.image_encoder(pixel_values[:, frame_idx, :, :]).last_hidden_state
|
1215 |
+
visual_features_frame += self.img_temperal_embedding[frame_idx]
|
1216 |
+
visual_features.append(visual_features_frame)
|
1217 |
+
|
1218 |
+
# finally, concatenate all features along sequence dimension
|
1219 |
+
visual_features = torch.cat(visual_features, dim=1)
|
1220 |
+
|
1221 |
+
else:
|
1222 |
+
raise ValueError("pixel_values must be of rank 4 or 5")
|
1223 |
+
|
1224 |
+
projected_visual_features = self.visual_projection(visual_features)
|
1225 |
+
|
1226 |
+
embedding_output = self.embeddings(
|
1227 |
+
input_ids=input_ids,
|
1228 |
+
position_ids=position_ids,
|
1229 |
+
inputs_embeds=inputs_embeds,
|
1230 |
+
past_key_values_length=past_key_values_length,
|
1231 |
+
)
|
1232 |
+
|
1233 |
+
if projected_visual_features is None:
|
1234 |
+
projected_visual_features = torch.zeros(
|
1235 |
+
(embedding_output.shape[0], 0, embedding_output.shape[2]),
|
1236 |
+
dtype=embedding_output.dtype,
|
1237 |
+
device=embedding_output.device,
|
1238 |
+
)
|
1239 |
+
|
1240 |
+
# Repeat visual features to match embedding batch size.
|
1241 |
+
projected_visual_features = projected_visual_features.repeat(
|
1242 |
+
embedding_output.size(0) // projected_visual_features.size(0), 1, 1
|
1243 |
+
)
|
1244 |
+
|
1245 |
+
# concatenate patch token and text token embeddings
|
1246 |
+
hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1)
|
1247 |
+
|
1248 |
+
# By default, an additive causal mask is created
|
1249 |
+
# for masking the future (one direction).
|
1250 |
+
tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device)
|
1251 |
+
|
1252 |
+
# Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len)
|
1253 |
+
combined_attention_mask = self.create_attention_mask(
|
1254 |
+
tgt=embedding_output,
|
1255 |
+
memory=projected_visual_features,
|
1256 |
+
tgt_mask=tgt_mask,
|
1257 |
+
past_key_values_length=past_key_values_length,
|
1258 |
+
)
|
1259 |
+
|
1260 |
+
if attention_mask is not None:
|
1261 |
+
# if the user provides an attention mask, we add it to the default one
|
1262 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
1263 |
+
expanded_attn_mask = _prepare_4d_attention_mask(
|
1264 |
+
attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]
|
1265 |
+
).to(embedding_output.device)
|
1266 |
+
if past_key_values_length > 0:
|
1267 |
+
expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :]
|
1268 |
+
else:
|
1269 |
+
combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask
|
1270 |
+
|
1271 |
+
encoder_outputs = self.encoder(
|
1272 |
+
hidden_states,
|
1273 |
+
attention_mask=combined_attention_mask,
|
1274 |
+
head_mask=head_mask,
|
1275 |
+
past_key_values=past_key_values,
|
1276 |
+
use_cache=use_cache,
|
1277 |
+
output_attentions=output_attentions,
|
1278 |
+
output_hidden_states=output_hidden_states,
|
1279 |
+
return_dict=return_dict,
|
1280 |
+
pixel_values_present=pixel_values is not None,
|
1281 |
+
)
|
1282 |
+
sequence_output = encoder_outputs[0]
|
1283 |
+
|
1284 |
+
if not return_dict:
|
1285 |
+
return (sequence_output,) + encoder_outputs[1:]
|
1286 |
+
|
1287 |
+
return BaseModelOutputWithPast(
|
1288 |
+
last_hidden_state=sequence_output,
|
1289 |
+
past_key_values=encoder_outputs.past_key_values,
|
1290 |
+
hidden_states=encoder_outputs.hidden_states,
|
1291 |
+
attentions=encoder_outputs.attentions,
|
1292 |
+
)
|
1293 |
+
|
1294 |
+
|
1295 |
+
@add_start_docstrings(
|
1296 |
+
"""GIT Model with a `language modeling` head on top for autoregressive language modeling.""", GIT_START_DOCSTRING
|
1297 |
+
)
|
1298 |
+
class GitForCausalLM(GitPreTrainedModel):
|
1299 |
+
_tied_weights_keys = ["output.weight"]
|
1300 |
+
|
1301 |
+
def __init__(self, config):
|
1302 |
+
super().__init__(config)
|
1303 |
+
|
1304 |
+
self.git = GitModel(config)
|
1305 |
+
self.output = nn.Linear(config.hidden_size, config.vocab_size)
|
1306 |
+
|
1307 |
+
# Initialize weights and apply final processing
|
1308 |
+
self.post_init()
|
1309 |
+
|
1310 |
+
def get_output_embeddings(self):
|
1311 |
+
return self.output
|
1312 |
+
|
1313 |
+
def set_output_embeddings(self, new_embeddings):
|
1314 |
+
self.output = new_embeddings
|
1315 |
+
|
1316 |
+
@add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1317 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1318 |
+
def forward(
|
1319 |
+
self,
|
1320 |
+
input_ids: Optional[torch.Tensor] = None,
|
1321 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1322 |
+
position_ids: Optional[torch.Tensor] = None,
|
1323 |
+
pixel_values: Optional[torch.Tensor] = None,
|
1324 |
+
head_mask: Optional[torch.Tensor] = None,
|
1325 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1326 |
+
labels: Optional[torch.Tensor] = None,
|
1327 |
+
past_key_values: Optional[List[torch.Tensor]] = None,
|
1328 |
+
use_cache: Optional[bool] = None,
|
1329 |
+
output_attentions: Optional[bool] = None,
|
1330 |
+
output_hidden_states: Optional[bool] = None,
|
1331 |
+
return_dict: Optional[bool] = None,
|
1332 |
+
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
|
1333 |
+
r"""
|
1334 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1335 |
+
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
1336 |
+
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
|
1337 |
+
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
|
1338 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
1339 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
1340 |
+
|
1341 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
1342 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
1343 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1344 |
+
use_cache (`bool`, *optional*):
|
1345 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1346 |
+
`past_key_values`).
|
1347 |
+
|
1348 |
+
Returns:
|
1349 |
+
|
1350 |
+
Examples:
|
1351 |
+
|
1352 |
+
Image captioning example:
|
1353 |
+
|
1354 |
+
```python
|
1355 |
+
>>> from transformers import AutoProcessor, AutoModelForCausalLM
|
1356 |
+
>>> import requests
|
1357 |
+
>>> from PIL import Image
|
1358 |
+
|
1359 |
+
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco")
|
1360 |
+
>>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
1361 |
+
|
1362 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1363 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1364 |
+
|
1365 |
+
>>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
|
1366 |
+
|
1367 |
+
>>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
|
1368 |
+
>>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
1369 |
+
>>> print(generated_caption)
|
1370 |
+
two cats sleeping on a pink blanket next to remotes.
|
1371 |
+
```
|
1372 |
+
|
1373 |
+
Visual question answering (VQA) example:
|
1374 |
+
|
1375 |
+
```python
|
1376 |
+
>>> from transformers import AutoProcessor, AutoModelForCausalLM
|
1377 |
+
>>> from huggingface_hub import hf_hub_download
|
1378 |
+
>>> from PIL import Image
|
1379 |
+
|
1380 |
+
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa")
|
1381 |
+
>>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa")
|
1382 |
+
|
1383 |
+
>>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
|
1384 |
+
>>> image = Image.open(file_path).convert("RGB")
|
1385 |
+
|
1386 |
+
>>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
|
1387 |
+
|
1388 |
+
>>> question = "what does the front of the bus say at the top?"
|
1389 |
+
|
1390 |
+
>>> input_ids = processor(text=question, add_special_tokens=False).input_ids
|
1391 |
+
>>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
|
1392 |
+
>>> input_ids = torch.tensor(input_ids).unsqueeze(0)
|
1393 |
+
|
1394 |
+
>>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
|
1395 |
+
>>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
|
1396 |
+
['what does the front of the bus say at the top? special']
|
1397 |
+
```
|
1398 |
+
|
1399 |
+
Video captioning example:
|
1400 |
+
|
1401 |
+
```python
|
1402 |
+
>>> import av
|
1403 |
+
>>> import numpy as np
|
1404 |
+
>>> from PIL import Image
|
1405 |
+
>>> from huggingface_hub import hf_hub_download
|
1406 |
+
>>> from transformers import AutoProcessor, AutoModelForCausalLM
|
1407 |
+
|
1408 |
+
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex")
|
1409 |
+
>>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex")
|
1410 |
+
|
1411 |
+
>>> # set seed for reproducability
|
1412 |
+
>>> np.random.seed(45)
|
1413 |
+
|
1414 |
+
|
1415 |
+
>>> def read_video_pyav(container, indices):
|
1416 |
+
... '''
|
1417 |
+
... Decode the video with PyAV decoder.
|
1418 |
+
... Args:
|
1419 |
+
... container (`av.container.input.InputContainer`): PyAV container.
|
1420 |
+
... indices (`List[int]`): List of frame indices to decode.
|
1421 |
+
... Returns:
|
1422 |
+
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
|
1423 |
+
... '''
|
1424 |
+
... frames = []
|
1425 |
+
... container.seek(0)
|
1426 |
+
... start_index = indices[0]
|
1427 |
+
... end_index = indices[-1]
|
1428 |
+
... for i, frame in enumerate(container.decode(video=0)):
|
1429 |
+
... if i > end_index:
|
1430 |
+
... break
|
1431 |
+
... if i >= start_index and i in indices:
|
1432 |
+
... frames.append(frame)
|
1433 |
+
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
|
1434 |
+
|
1435 |
+
|
1436 |
+
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
|
1437 |
+
... '''
|
1438 |
+
... Sample a given number of frame indices from the video.
|
1439 |
+
... Args:
|
1440 |
+
... clip_len (`int`): Total number of frames to sample.
|
1441 |
+
... frame_sample_rate (`int`): Sample every n-th frame.
|
1442 |
+
... seg_len (`int`): Maximum allowed index of sample's last frame.
|
1443 |
+
... Returns:
|
1444 |
+
... indices (`List[int]`): List of sampled frame indices
|
1445 |
+
... '''
|
1446 |
+
... converted_len = int(clip_len * frame_sample_rate)
|
1447 |
+
... end_idx = np.random.randint(converted_len, seg_len)
|
1448 |
+
... start_idx = end_idx - converted_len
|
1449 |
+
... indices = np.linspace(start_idx, end_idx, num=clip_len)
|
1450 |
+
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
|
1451 |
+
... return indices
|
1452 |
+
|
1453 |
+
|
1454 |
+
>>> # load video
|
1455 |
+
>>> file_path = hf_hub_download(
|
1456 |
+
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
|
1457 |
+
... )
|
1458 |
+
>>> container = av.open(file_path)
|
1459 |
+
|
1460 |
+
>>> # sample frames
|
1461 |
+
>>> num_frames = model.config.num_image_with_embedding
|
1462 |
+
>>> indices = sample_frame_indices(
|
1463 |
+
... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
|
1464 |
+
... )
|
1465 |
+
>>> frames = read_video_pyav(container, indices)
|
1466 |
+
|
1467 |
+
>>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values
|
1468 |
+
|
1469 |
+
>>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
|
1470 |
+
|
1471 |
+
>>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
|
1472 |
+
Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
|
1473 |
+
```
|
1474 |
+
"""
|
1475 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1476 |
+
if labels is not None:
|
1477 |
+
use_cache = False
|
1478 |
+
|
1479 |
+
outputs = self.git(
|
1480 |
+
input_ids,
|
1481 |
+
attention_mask=attention_mask,
|
1482 |
+
position_ids=position_ids,
|
1483 |
+
pixel_values=pixel_values,
|
1484 |
+
head_mask=head_mask,
|
1485 |
+
inputs_embeds=inputs_embeds,
|
1486 |
+
past_key_values=past_key_values,
|
1487 |
+
use_cache=use_cache,
|
1488 |
+
output_attentions=output_attentions,
|
1489 |
+
output_hidden_states=output_hidden_states,
|
1490 |
+
return_dict=return_dict,
|
1491 |
+
)
|
1492 |
+
|
1493 |
+
sequence_output = outputs[0]
|
1494 |
+
logits = self.output(sequence_output)
|
1495 |
+
|
1496 |
+
loss = None
|
1497 |
+
if labels is not None:
|
1498 |
+
# we are doing next-token prediction; shift prediction scores and input ids by one
|
1499 |
+
num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens
|
1500 |
+
shifted_logits = logits[:, num_image_tokens:-1, :].contiguous()
|
1501 |
+
labels = labels[:, 1:].contiguous()
|
1502 |
+
loss_fct = CrossEntropyLoss()
|
1503 |
+
loss = loss_fct(shifted_logits.view(-1, self.config.vocab_size), labels.view(-1))
|
1504 |
+
|
1505 |
+
if not return_dict:
|
1506 |
+
output = (logits,) + outputs[1:]
|
1507 |
+
return ((loss,) + output) if loss is not None else output
|
1508 |
+
|
1509 |
+
return CausalLMOutputWithPast(
|
1510 |
+
loss=loss,
|
1511 |
+
logits=logits,
|
1512 |
+
past_key_values=outputs.past_key_values,
|
1513 |
+
hidden_states=outputs.hidden_states,
|
1514 |
+
attentions=outputs.attentions,
|
1515 |
+
)
|
1516 |
+
|
1517 |
+
def prepare_inputs_for_generation(
|
1518 |
+
self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
|
1519 |
+
):
|
1520 |
+
# cut decoder_input_ids if past_key_values is used
|
1521 |
+
if past_key_values is not None:
|
1522 |
+
input_ids = input_ids[:, -1:]
|
1523 |
+
|
1524 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
1525 |
+
input_shape = input_ids.shape
|
1526 |
+
if attention_mask is None:
|
1527 |
+
attention_mask = input_ids.new_ones(input_shape)
|
1528 |
+
|
1529 |
+
return {
|
1530 |
+
"input_ids": input_ids,
|
1531 |
+
"attention_mask": attention_mask,
|
1532 |
+
"pixel_values": kwargs.get("pixel_values", None),
|
1533 |
+
"past_key_values": past_key_values,
|
1534 |
+
"use_cache": use_cache,
|
1535 |
+
}
|
1536 |
+
|
1537 |
+
def _reorder_cache(self, past_key_values, beam_idx):
|
1538 |
+
reordered_past = ()
|
1539 |
+
for layer_past in past_key_values:
|
1540 |
+
reordered_past += (
|
1541 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1542 |
+
)
|
1543 |
+
return reordered_past
|
llmeval-env/lib/python3.10/site-packages/transformers/models/git/processing_git.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Image/Text processor class for GIT
|
17 |
+
"""
|
18 |
+
|
19 |
+
from ...processing_utils import ProcessorMixin
|
20 |
+
from ...tokenization_utils_base import BatchEncoding
|
21 |
+
|
22 |
+
|
23 |
+
class GitProcessor(ProcessorMixin):
|
24 |
+
r"""
|
25 |
+
Constructs a GIT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.
|
26 |
+
|
27 |
+
[`GitProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the
|
28 |
+
[`~GitProcessor.__call__`] and [`~GitProcessor.decode`] for more information.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
image_processor ([`AutoImageProcessor`]):
|
32 |
+
The image processor is a required input.
|
33 |
+
tokenizer ([`AutoTokenizer`]):
|
34 |
+
The tokenizer is a required input.
|
35 |
+
"""
|
36 |
+
|
37 |
+
attributes = ["image_processor", "tokenizer"]
|
38 |
+
image_processor_class = "AutoImageProcessor"
|
39 |
+
tokenizer_class = "AutoTokenizer"
|
40 |
+
|
41 |
+
def __init__(self, image_processor, tokenizer):
|
42 |
+
super().__init__(image_processor, tokenizer)
|
43 |
+
self.current_processor = self.image_processor
|
44 |
+
|
45 |
+
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
|
46 |
+
"""
|
47 |
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
48 |
+
and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
|
49 |
+
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
|
50 |
+
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
|
51 |
+
of the above two methods for more information.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
text (`str`, `List[str]`, `List[List[str]]`):
|
55 |
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
56 |
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
57 |
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
58 |
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
59 |
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
60 |
+
tensor. Both channels-first and channels-last formats are supported.
|
61 |
+
|
62 |
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
63 |
+
If set, will return tensors of a particular framework. Acceptable values are:
|
64 |
+
|
65 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
66 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
67 |
+
- `'np'`: Return NumPy `np.ndarray` objects.
|
68 |
+
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
69 |
+
|
70 |
+
Returns:
|
71 |
+
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
|
72 |
+
|
73 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
74 |
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
75 |
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
76 |
+
`None`).
|
77 |
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
78 |
+
"""
|
79 |
+
|
80 |
+
if text is None and images is None:
|
81 |
+
raise ValueError("You have to specify either text or images. Both cannot be none.")
|
82 |
+
|
83 |
+
if text is not None:
|
84 |
+
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
|
85 |
+
|
86 |
+
if images is not None:
|
87 |
+
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
|
88 |
+
|
89 |
+
if text is not None and images is not None:
|
90 |
+
encoding["pixel_values"] = image_features.pixel_values
|
91 |
+
return encoding
|
92 |
+
elif text is not None:
|
93 |
+
return encoding
|
94 |
+
else:
|
95 |
+
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
|
96 |
+
|
97 |
+
def batch_decode(self, *args, **kwargs):
|
98 |
+
"""
|
99 |
+
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
100 |
+
refer to the docstring of this method for more information.
|
101 |
+
"""
|
102 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
103 |
+
|
104 |
+
def decode(self, *args, **kwargs):
|
105 |
+
"""
|
106 |
+
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
107 |
+
the docstring of this method for more information.
|
108 |
+
"""
|
109 |
+
return self.tokenizer.decode(*args, **kwargs)
|
110 |
+
|
111 |
+
@property
|
112 |
+
def model_input_names(self):
|
113 |
+
return ["input_ids", "attention_mask", "pixel_values"]
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from .auto import AutoHfQuantizer, AutoQuantizationConfig
|
15 |
+
from .base import HfQuantizer
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (313 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc
ADDED
Binary file (4.58 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc
ADDED
Binary file (9.79 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc
ADDED
Binary file (3.47 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc
ADDED
Binary file (4.25 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc
ADDED
Binary file (3.68 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc
ADDED
Binary file (6.97 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc
ADDED
Binary file (607 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/auto.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import warnings
|
15 |
+
from typing import Dict, Optional, Union
|
16 |
+
|
17 |
+
from ..models.auto.configuration_auto import AutoConfig
|
18 |
+
from ..utils.quantization_config import (
|
19 |
+
AqlmConfig,
|
20 |
+
AwqConfig,
|
21 |
+
BitsAndBytesConfig,
|
22 |
+
GPTQConfig,
|
23 |
+
QuantizationConfigMixin,
|
24 |
+
QuantizationMethod,
|
25 |
+
QuantoConfig,
|
26 |
+
)
|
27 |
+
from .quantizer_aqlm import AqlmHfQuantizer
|
28 |
+
from .quantizer_awq import AwqQuantizer
|
29 |
+
from .quantizer_bnb_4bit import Bnb4BitHfQuantizer
|
30 |
+
from .quantizer_bnb_8bit import Bnb8BitHfQuantizer
|
31 |
+
from .quantizer_gptq import GptqHfQuantizer
|
32 |
+
from .quantizer_quanto import QuantoHfQuantizer
|
33 |
+
|
34 |
+
|
35 |
+
AUTO_QUANTIZER_MAPPING = {
|
36 |
+
"awq": AwqQuantizer,
|
37 |
+
"bitsandbytes_4bit": Bnb4BitHfQuantizer,
|
38 |
+
"bitsandbytes_8bit": Bnb8BitHfQuantizer,
|
39 |
+
"gptq": GptqHfQuantizer,
|
40 |
+
"aqlm": AqlmHfQuantizer,
|
41 |
+
"quanto": QuantoHfQuantizer,
|
42 |
+
}
|
43 |
+
|
44 |
+
AUTO_QUANTIZATION_CONFIG_MAPPING = {
|
45 |
+
"awq": AwqConfig,
|
46 |
+
"bitsandbytes_4bit": BitsAndBytesConfig,
|
47 |
+
"bitsandbytes_8bit": BitsAndBytesConfig,
|
48 |
+
"gptq": GPTQConfig,
|
49 |
+
"aqlm": AqlmConfig,
|
50 |
+
"quanto": QuantoConfig,
|
51 |
+
}
|
52 |
+
|
53 |
+
|
54 |
+
class AutoQuantizationConfig:
|
55 |
+
"""
|
56 |
+
The Auto-HF quantization config class that takes care of automatically dispatching to the correct
|
57 |
+
quantization config given a quantization config stored in a dictionary.
|
58 |
+
"""
|
59 |
+
|
60 |
+
@classmethod
|
61 |
+
def from_dict(cls, quantization_config_dict: Dict):
|
62 |
+
quant_method = quantization_config_dict.get("quant_method", None)
|
63 |
+
# We need a special care for bnb models to make sure everything is BC ..
|
64 |
+
if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
|
65 |
+
suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
|
66 |
+
quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
|
67 |
+
elif quant_method is None:
|
68 |
+
raise ValueError(
|
69 |
+
"The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
|
70 |
+
)
|
71 |
+
|
72 |
+
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys():
|
73 |
+
raise ValueError(
|
74 |
+
f"Unknown quantization type, got {quant_method} - supported types are:"
|
75 |
+
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
|
76 |
+
)
|
77 |
+
|
78 |
+
target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
|
79 |
+
return target_cls.from_dict(quantization_config_dict)
|
80 |
+
|
81 |
+
@classmethod
|
82 |
+
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
|
83 |
+
model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
|
84 |
+
if getattr(model_config, "quantization_config", None) is None:
|
85 |
+
raise ValueError(
|
86 |
+
f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
|
87 |
+
)
|
88 |
+
quantization_config_dict = model_config.quantization_config
|
89 |
+
quantization_config = cls.from_dict(quantization_config_dict)
|
90 |
+
# Update with potential kwargs that are passed through from_pretrained.
|
91 |
+
quantization_config.update(kwargs)
|
92 |
+
return quantization_config
|
93 |
+
|
94 |
+
|
95 |
+
class AutoHfQuantizer:
|
96 |
+
"""
|
97 |
+
The Auto-HF quantizer class that takes care of automatically instantiating to the correct
|
98 |
+
`HfQuantizer` given the `QuantizationConfig`.
|
99 |
+
"""
|
100 |
+
|
101 |
+
@classmethod
|
102 |
+
def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs):
|
103 |
+
# Convert it to a QuantizationConfig if the q_config is a dict
|
104 |
+
if isinstance(quantization_config, dict):
|
105 |
+
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
|
106 |
+
|
107 |
+
quant_method = quantization_config.quant_method
|
108 |
+
|
109 |
+
# Again, we need a special care for bnb as we have a single quantization config
|
110 |
+
# class for both 4-bit and 8-bit quantization
|
111 |
+
if quant_method == QuantizationMethod.BITS_AND_BYTES:
|
112 |
+
if quantization_config.load_in_8bit:
|
113 |
+
quant_method += "_8bit"
|
114 |
+
else:
|
115 |
+
quant_method += "_4bit"
|
116 |
+
|
117 |
+
if quant_method not in AUTO_QUANTIZER_MAPPING.keys():
|
118 |
+
raise ValueError(
|
119 |
+
f"Unknown quantization type, got {quant_method} - supported types are:"
|
120 |
+
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
|
121 |
+
)
|
122 |
+
|
123 |
+
target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
|
124 |
+
return target_cls(quantization_config, **kwargs)
|
125 |
+
|
126 |
+
@classmethod
|
127 |
+
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
|
128 |
+
quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
|
129 |
+
return cls.from_config(quantization_config)
|
130 |
+
|
131 |
+
@classmethod
|
132 |
+
def merge_quantization_configs(
|
133 |
+
cls,
|
134 |
+
quantization_config: Union[dict, QuantizationConfigMixin],
|
135 |
+
quantization_config_from_args: Optional[QuantizationConfigMixin],
|
136 |
+
):
|
137 |
+
"""
|
138 |
+
handles situations where both quantization_config from args and quantization_config from model config are present.
|
139 |
+
"""
|
140 |
+
if quantization_config_from_args is not None:
|
141 |
+
warning_msg = (
|
142 |
+
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
|
143 |
+
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
|
144 |
+
)
|
145 |
+
else:
|
146 |
+
warning_msg = ""
|
147 |
+
|
148 |
+
if isinstance(quantization_config, dict):
|
149 |
+
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
|
150 |
+
|
151 |
+
if isinstance(quantization_config, (GPTQConfig, AwqConfig)) and quantization_config_from_args is not None:
|
152 |
+
# special case for GPTQ / AWQ config collision
|
153 |
+
loading_attr_dict = quantization_config_from_args.get_loading_attributes()
|
154 |
+
for attr, val in loading_attr_dict.items():
|
155 |
+
setattr(quantization_config, attr, val)
|
156 |
+
warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
|
157 |
+
|
158 |
+
if warning_msg != "":
|
159 |
+
warnings.warn(warning_msg)
|
160 |
+
|
161 |
+
return quantization_config
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/base.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from abc import ABC, abstractmethod
|
15 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
16 |
+
|
17 |
+
from ..utils import is_torch_available
|
18 |
+
from ..utils.quantization_config import QuantizationConfigMixin
|
19 |
+
|
20 |
+
|
21 |
+
if TYPE_CHECKING:
|
22 |
+
from ..modeling_utils import PreTrainedModel
|
23 |
+
|
24 |
+
if is_torch_available():
|
25 |
+
import torch
|
26 |
+
|
27 |
+
|
28 |
+
class HfQuantizer(ABC):
|
29 |
+
"""
|
30 |
+
Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization.
|
31 |
+
This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method
|
32 |
+
yet.
|
33 |
+
|
34 |
+
Attributes
|
35 |
+
quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`):
|
36 |
+
The quantization config that defines the quantization parameters of your model that you want to quantize.
|
37 |
+
modules_to_not_convert (`List[str]`, *optional*):
|
38 |
+
The list of module names to not convert when quantizing the model.
|
39 |
+
required_packages (`List[str]`, *optional*):
|
40 |
+
The list of required pip packages to install prior to using the quantizer
|
41 |
+
requires_calibration (`bool`):
|
42 |
+
Whether the quantization method requires to calibrate the model before using it.
|
43 |
+
requires_parameters_quantization (`bool`):
|
44 |
+
Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is
|
45 |
+
required to create a new xxxParameter in order to properly quantize the model.
|
46 |
+
"""
|
47 |
+
|
48 |
+
requires_calibration = False
|
49 |
+
required_packages = None
|
50 |
+
requires_parameters_quantization = False
|
51 |
+
|
52 |
+
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
|
53 |
+
self.quantization_config = quantization_config
|
54 |
+
|
55 |
+
# -- Handle extra kwargs below --
|
56 |
+
self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", [])
|
57 |
+
self.pre_quantized = kwargs.pop("pre_quantized", True)
|
58 |
+
|
59 |
+
if not self.pre_quantized and self.requires_calibration:
|
60 |
+
raise ValueError(
|
61 |
+
f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized."
|
62 |
+
f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to "
|
63 |
+
f"pass `pre_quantized=True` while knowing what you are doing."
|
64 |
+
)
|
65 |
+
|
66 |
+
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
|
67 |
+
"""
|
68 |
+
Some quantization methods require to explicitly set the dtype of the model to a
|
69 |
+
target dtype. You need to override this method in case you want to make sure that behavior is
|
70 |
+
preserved
|
71 |
+
|
72 |
+
Args:
|
73 |
+
torch_dtype (`torch.dtype`):
|
74 |
+
The input dtype that is passed in `from_pretrained`
|
75 |
+
"""
|
76 |
+
return torch_dtype
|
77 |
+
|
78 |
+
def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
79 |
+
"""
|
80 |
+
Override this method if you want to pass a override the existing device map with a new
|
81 |
+
one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is
|
82 |
+
passed, the device_map is set to `"auto"``
|
83 |
+
|
84 |
+
Args:
|
85 |
+
device_map (`Union[dict, str]`, *optional*):
|
86 |
+
The device_map that is passed through the `from_pretrained` method.
|
87 |
+
"""
|
88 |
+
return device_map
|
89 |
+
|
90 |
+
def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
|
91 |
+
"""
|
92 |
+
Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained`
|
93 |
+
to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype`
|
94 |
+
to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
torch_dtype (`torch.dtype`, *optional*):
|
98 |
+
The torch_dtype that is used to compute the device_map.
|
99 |
+
"""
|
100 |
+
return torch_dtype
|
101 |
+
|
102 |
+
def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
|
103 |
+
"""
|
104 |
+
Override this method if you want to adjust the `missing_keys`.
|
105 |
+
|
106 |
+
Args:
|
107 |
+
missing_keys (`List[str]`, *optional*):
|
108 |
+
The list of missing keys in the checkpoint compared to the state dict of the model
|
109 |
+
"""
|
110 |
+
return missing_keys
|
111 |
+
|
112 |
+
def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]:
|
113 |
+
"""
|
114 |
+
returns dtypes for modules that are not quantized - used for the computation of the device_map in case
|
115 |
+
one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified
|
116 |
+
in `_process_model_before_weight_loading`.
|
117 |
+
|
118 |
+
Args:
|
119 |
+
model (`~transformers.PreTrainedModel`):
|
120 |
+
The model to quantize
|
121 |
+
torch_dtype (`torch.dtype`):
|
122 |
+
The dtype passed in `from_pretrained` method.
|
123 |
+
"""
|
124 |
+
|
125 |
+
return {
|
126 |
+
name: torch_dtype
|
127 |
+
for name, _ in model.named_parameters()
|
128 |
+
if any(m in name for m in self.modules_to_not_convert)
|
129 |
+
}
|
130 |
+
|
131 |
+
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
|
132 |
+
"""adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization"""
|
133 |
+
return max_memory
|
134 |
+
|
135 |
+
def check_quantized_param(
|
136 |
+
self,
|
137 |
+
model: "PreTrainedModel",
|
138 |
+
param_value: "torch.Tensor",
|
139 |
+
param_name: str,
|
140 |
+
state_dict: Dict[str, Any],
|
141 |
+
**kwargs,
|
142 |
+
) -> bool:
|
143 |
+
"""
|
144 |
+
checks if a loaded state_dict component is part of quantized param + some validation; only defined if
|
145 |
+
requires_parameters_quantization == True for quantization methods that require to create a new parameters
|
146 |
+
for quantization.
|
147 |
+
"""
|
148 |
+
return False
|
149 |
+
|
150 |
+
def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter":
|
151 |
+
"""
|
152 |
+
takes needed components from state_dict and creates quantized param; only applicable if
|
153 |
+
requires_parameters_quantization == True
|
154 |
+
"""
|
155 |
+
if not self.requires_parameters_quantization:
|
156 |
+
raise AttributeError(
|
157 |
+
f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}."
|
158 |
+
)
|
159 |
+
|
160 |
+
def validate_environment(self, *args, **kwargs):
|
161 |
+
"""
|
162 |
+
This method is used to potentially check for potential conflicts with arguments that are
|
163 |
+
passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers.
|
164 |
+
If no explicit check are needed, simply return nothing.
|
165 |
+
"""
|
166 |
+
return
|
167 |
+
|
168 |
+
def preprocess_model(self, model: "PreTrainedModel", **kwargs):
|
169 |
+
"""
|
170 |
+
Setting model attributes and/or converting model before weights loading. At this point
|
171 |
+
the model should be initialized on the meta device so you can freely manipulate the skeleton
|
172 |
+
of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
|
173 |
+
|
174 |
+
Args:
|
175 |
+
model (`~transformers.PreTrainedModel`):
|
176 |
+
The model to quantize
|
177 |
+
kwargs (`dict`, *optional*):
|
178 |
+
The keyword arguments that are passed along `_process_model_before_weight_loading`.
|
179 |
+
"""
|
180 |
+
model.is_quantized = True
|
181 |
+
model.quantization_method = self.quantization_config.quant_method
|
182 |
+
return self._process_model_before_weight_loading(model, **kwargs)
|
183 |
+
|
184 |
+
def postprocess_model(self, model: "PreTrainedModel", **kwargs):
|
185 |
+
"""
|
186 |
+
Post-process the model post weights loading.
|
187 |
+
Make sure to override the abstract method `_process_model_after_weight_loading`.
|
188 |
+
|
189 |
+
Args:
|
190 |
+
model (`~transformers.PreTrainedModel`):
|
191 |
+
The model to quantize
|
192 |
+
kwargs (`dict`, *optional*):
|
193 |
+
The keyword arguments that are passed along `_process_model_after_weight_loading`.
|
194 |
+
"""
|
195 |
+
return self._process_model_after_weight_loading(model, **kwargs)
|
196 |
+
|
197 |
+
@abstractmethod
|
198 |
+
def _process_model_before_weight_loading(self, model, **kwargs):
|
199 |
+
...
|
200 |
+
|
201 |
+
@abstractmethod
|
202 |
+
def _process_model_after_weight_loading(self, model, **kwargs):
|
203 |
+
...
|
204 |
+
|
205 |
+
@property
|
206 |
+
@abstractmethod
|
207 |
+
def is_serializable(self):
|
208 |
+
...
|
209 |
+
|
210 |
+
@property
|
211 |
+
@abstractmethod
|
212 |
+
def is_trainable(self):
|
213 |
+
...
|
llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import importlib
|
15 |
+
from typing import TYPE_CHECKING, Optional
|
16 |
+
|
17 |
+
from packaging import version
|
18 |
+
|
19 |
+
from .base import HfQuantizer
|
20 |
+
|
21 |
+
|
22 |
+
if TYPE_CHECKING:
|
23 |
+
from ..modeling_utils import PreTrainedModel
|
24 |
+
|
25 |
+
from ..integrations import replace_with_aqlm_linear
|
26 |
+
from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available, logging
|
27 |
+
from ..utils.quantization_config import QuantizationConfigMixin
|
28 |
+
|
29 |
+
|
30 |
+
if is_torch_available():
|
31 |
+
import torch
|
32 |
+
|
33 |
+
logger = logging.get_logger(__name__)
|
34 |
+
|
35 |
+
|
36 |
+
class AqlmHfQuantizer(HfQuantizer):
|
37 |
+
"""
|
38 |
+
Quantizer of the AQLM method. Enables the loading of prequantized models.
|
39 |
+
"""
|
40 |
+
|
41 |
+
requires_calibration = True
|
42 |
+
required_packages = ["aqlm"]
|
43 |
+
optimum_quantizer = None
|
44 |
+
|
45 |
+
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
|
46 |
+
super().__init__(quantization_config, **kwargs)
|
47 |
+
self.quantization_config = quantization_config
|
48 |
+
|
49 |
+
def validate_environment(self, *args, **kwargs):
|
50 |
+
if not is_accelerate_available():
|
51 |
+
raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`")
|
52 |
+
|
53 |
+
if not is_aqlm_available():
|
54 |
+
raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`")
|
55 |
+
|
56 |
+
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
|
57 |
+
if torch_dtype is None:
|
58 |
+
if torch.cuda.is_available():
|
59 |
+
torch_dtype = torch.float16
|
60 |
+
logger.info(
|
61 |
+
"CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually."
|
62 |
+
)
|
63 |
+
else:
|
64 |
+
torch_dtype = torch.float32
|
65 |
+
logger.info(
|
66 |
+
"CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `torch_dtype` manually."
|
67 |
+
)
|
68 |
+
return torch_dtype
|
69 |
+
|
70 |
+
def _process_model_before_weight_loading(
|
71 |
+
self,
|
72 |
+
model: "PreTrainedModel",
|
73 |
+
**kwargs,
|
74 |
+
):
|
75 |
+
replace_with_aqlm_linear(
|
76 |
+
model,
|
77 |
+
quantization_config=self.quantization_config,
|
78 |
+
linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize,
|
79 |
+
)
|
80 |
+
model.config.quantization_config = self.quantization_config
|
81 |
+
|
82 |
+
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
|
83 |
+
return model
|
84 |
+
|
85 |
+
@property
|
86 |
+
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
|
87 |
+
aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2")
|
88 |
+
if aqlm_supports_training:
|
89 |
+
return True
|
90 |
+
else:
|
91 |
+
logger.warning(
|
92 |
+
f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`"
|
93 |
+
)
|
94 |
+
return False
|
95 |
+
|
96 |
+
@property
|
97 |
+
def is_serializable(self):
|
98 |
+
return True
|