applied-ai-018 commited on
Commit
bc96d3f
·
verified ·
1 Parent(s): f0bac08

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  2. lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-loglikelihood +1 -0
  3. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-loglikelihood +1 -0
  4. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-loglikelihood +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_existential_there_object_raising-v0-res.json +1 -0
  6. lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-loglikelihood +1 -0
  7. lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-res.json +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-loglikelihood +1 -0
  10. lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-loglikelihood +1 -0
  12. lm-evaluation-harness/tests/testdata/cb-v1-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-res.json +1 -0
  15. lm-evaluation-harness/tests/testdata/lambada_standard-v0-res.json +1 -0
  16. lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-res.json +1 -0
  17. lm-evaluation-harness/tests/testdata/mutual-v0-loglikelihood +1 -0
  18. lm-evaluation-harness/tests/testdata/mutual-v0-res.json +1 -0
  19. lm-evaluation-harness/tests/testdata/openbookqa-v0-res.json +1 -0
  20. lm-evaluation-harness/tests/testdata/pile_books3-v1-res.json +1 -0
  21. lm-evaluation-harness/tests/testdata/pile_enron-v0-res.json +1 -0
  22. lm-evaluation-harness/tests/testdata/pile_enron-v1-res.json +1 -0
  23. lm-evaluation-harness/tests/testdata/pile_github-v1-loglikelihood_rolling +1 -0
  24. lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-loglikelihood_rolling +1 -0
  25. lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-res.json +1 -0
  26. lm-evaluation-harness/tests/testdata/pile_philpapers-v0-res.json +1 -0
  27. lm-evaluation-harness/tests/testdata/random_insertion-v0-res.json +1 -0
  28. lm-evaluation-harness/tests/testdata/triviaqa-v1-res.json +1 -0
  29. lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-res.json +1 -0
  30. lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-loglikelihood +1 -0
  31. lm-evaluation-harness/tests/testdata/winogrande-v0-loglikelihood +1 -0
  32. lm-evaluation-harness/tests/testdata/wsc273-v0-loglikelihood +1 -0
  33. venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png +3 -0
  34. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py +67 -0
  35. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py +200 -0
  40. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py +1170 -0
  41. venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py +800 -0
  42. venv/lib/python3.10/site-packages/transformers/models/mbart/__init__.py +148 -0
  43. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/convert_mbart_original_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8372e8f5a716ef6fd653031d370765ed4ac4f06eaf8cc2391d39b84c5df2da3c
3
+ size 33555533
lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 49edb1e735660631ea6cc309721e6c0b80b7106a613a6959514852ca48f1130e
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7fab9f02e71a224ae7931aa77f8a9a61d887a7480756adc965d4746e97fb04a5
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 007c47e5fbf88119c5180feef75e1345d448e56adcd4c7ab2d52fb8d67350d34
lm-evaluation-harness/tests/testdata/blimp_existential_there_object_raising-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_existential_there_object_raising": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_existential_there_object_raising": 0}}
lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d77594382e6d9af31a8b8ef00ba1ef6c29d6be6d0ddb7a9c27ef25ace654e05a
lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_passive_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_passive_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_sentential_subject_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_subject_island": 0}}
lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4d4aaa0274ccd485ff8430ed61b8f83806febe18c16616c7d050f637a0463eba
lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_vs_that_with_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_vs_that_with_gap": 0}}
lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ eed67491bdf493a1dad8f1d9766bc7bd0e79946365b833c0f7eb81ac998e3dca
lm-evaluation-harness/tests/testdata/cb-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"cb": {"acc": 0.3392857142857143, "acc_stderr": 0.06384226561930825, "f1": 0.2819143819143819}}, "versions": {"cb": 1}}
lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-business_ethics": {"acc": 0.29, "acc_norm": 0.27, "acc_norm_stderr": 0.044619604333847394, "acc_stderr": 0.045604802157206845}}, "versions": {"hendrycksTest-business_ethics": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_psychology": {"acc": 0.24587155963302754, "acc_norm": 0.23302752293577983, "acc_norm_stderr": 0.018125669180861493, "acc_stderr": 0.018461940968708436}}, "versions": {"hendrycksTest-high_school_psychology": 0}}
lm-evaluation-harness/tests/testdata/lambada_standard-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_standard": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_standard": 0}}
lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_counting_and_prob": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_counting_and_prob": 1}}
lm-evaluation-harness/tests/testdata/mutual-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ f759213a28f0412510bf1a24c9cab0dae64bdee902d42a26225295445e7779db
lm-evaluation-harness/tests/testdata/mutual-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mutual": {"mrr": 0.5023513920240772, "mrr_stderr": 0.009501864812936679, "r@1": 0.22573363431151242, "r@1_stderr": 0.014053085820407457, "r@2": 0.4221218961625282, "r@2_stderr": 0.016602191705517556}}, "versions": {"mutual": 0}}
lm-evaluation-harness/tests/testdata/openbookqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"openbookqa": {"acc": 0.214, "acc_norm": 0.276, "acc_norm_stderr": 0.020011219298073517, "acc_stderr": 0.018359797502387046}}, "versions": {"openbookqa": 0}}
lm-evaluation-harness/tests/testdata/pile_books3-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_books3": {"bits_per_byte": 1.2901280503011222e-06, "byte_perplexity": 1.0000008942490204, "word_perplexity": 1.0000052870063607}}, "versions": {"pile_books3": 1}}
lm-evaluation-harness/tests/testdata/pile_enron-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_enron": {"bits_per_byte": 0.0003163902828673244, "byte_perplexity": 1.000316440339552, "word_perplexity": 1.00224668051869}}, "versions": {"pile_enron": 0}}
lm-evaluation-harness/tests/testdata/pile_enron-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_enron": {"bits_per_byte": 0.0004564546920781453, "byte_perplexity": 1.000316440339552, "word_perplexity": 1.00224668051869}}, "versions": {"pile_enron": 1}}
lm-evaluation-harness/tests/testdata/pile_github-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ df384c3df3d8f53273e97127c5bb84c17e638acad7d6bc9c91f6dee96d43b639
lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 0f1c23a1f4ddec0c2b1ff34de8d1505b0eb9e2868d8edbcc1b6de13d02f32036
lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_opensubtitles": {"bits_per_byte": 1.5213441136639177e-05, "byte_perplexity": 1.0000152135568616, "word_perplexity": 1.0000856162053249}}, "versions": {"pile_opensubtitles": 0}}
lm-evaluation-harness/tests/testdata/pile_philpapers-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_philpapers": {"bits_per_byte": 6.241575895982095e-06, "byte_perplexity": 1.0000062415953748, "word_perplexity": 1.0000409888564146}}, "versions": {"pile_philpapers": 0}}
lm-evaluation-harness/tests/testdata/random_insertion-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"random_insertion": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"random_insertion": 0}}
lm-evaluation-harness/tests/testdata/triviaqa-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"triviaqa": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"triviaqa": 1}}
lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"truthfulqa_gen": {"bleu_acc": 0.0, "bleu_acc_stderr": 0.0, "bleu_diff": 0.0, "bleu_diff_stderr": 0.0, "bleu_max": 0.0, "bleu_max_stderr": 0.0, "bleurt_acc": 0.8372093023255814, "bleurt_acc_stderr": 0.012923696051772253, "bleurt_diff": 0.13967358205134603, "bleurt_diff_stderr": 0.00532907098769571, "bleurt_max": -1.4402793981454072, "bleurt_max_stderr": 0.0021884846359458963, "rouge1_acc": 0.0, "rouge1_acc_stderr": 0.0, "rouge1_diff": 0.0, "rouge1_diff_stderr": 0.0, "rouge1_max": 0.0, "rouge1_max_stderr": 0.0, "rouge2_acc": 0.0, "rouge2_acc_stderr": 0.0, "rouge2_diff": 0.0, "rouge2_diff_stderr": 0.0, "rouge2_max": 0.0, "rouge2_max_stderr": 0.0, "rougeL_acc": 0.0, "rougeL_acc_stderr": 0.0, "rougeL_diff": 0.0, "rougeL_diff_stderr": 0.0, "rougeL_max": 0.0, "rougeL_max_stderr": 0.0}}, "versions": {"truthfulqa_gen": 0}}
lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 1e07020e9cf41d46ed65312eb39d2b8e6599673d4f0d6b67c0d0eba0efb493bb
lm-evaluation-harness/tests/testdata/winogrande-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 90a3eff49de9173964d46f5ed57bcf9a78a72dd1bfe0e5323b25cebb40b49ea9
lm-evaluation-harness/tests/testdata/wsc273-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 26450d414c4581feb51a09882080e7a9b95882e7eab47b1751a4a6024b5a60ee
venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png ADDED

Git LFS Details

  • SHA256: 115d0b9b81ed40f93fe9e216b4f6384cf71093e3bbb64a5d648b8b9858c645a0
  • Pointer size: 129 Bytes
  • Size of remote file: 6.86 kB
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"]
44
+
45
+ if TYPE_CHECKING:
46
+ from .processing_layoutxlm import LayoutXLMProcessor
47
+
48
+ try:
49
+ if not is_sentencepiece_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
55
+
56
+ try:
57
+ if not is_tokenizers_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
63
+
64
+ else:
65
+ import sys
66
+
67
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc ADDED
Binary file (7.27 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc ADDED
Binary file (39 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LayoutXLM.
17
+ """
18
+ import warnings
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class LayoutXLMProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a LayoutXLM processor which combines a LayoutXLM image processor and a LayoutXLM tokenizer into a single
29
+ processor.
30
+
31
+ [`LayoutXLMProcessor`] offers all the functionalities you need to prepare data for the model.
32
+
33
+ It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
34
+ get words and normalized bounding boxes. These are then provided to [`LayoutXLMTokenizer`] or
35
+ [`LayoutXLMTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
36
+ `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
37
+ into token-level `labels` for token classification tasks (such as FUNSD, CORD).
38
+
39
+ Args:
40
+ image_processor (`LayoutLMv2ImageProcessor`, *optional*):
41
+ An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
42
+ tokenizer (`LayoutXLMTokenizer` or `LayoutXLMTokenizerFast`, *optional*):
43
+ An instance of [`LayoutXLMTokenizer`] or [`LayoutXLMTokenizerFast`]. The tokenizer is a required input.
44
+ """
45
+
46
+ attributes = ["image_processor", "tokenizer"]
47
+ image_processor_class = "LayoutLMv2ImageProcessor"
48
+ tokenizer_class = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
49
+
50
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
51
+ if "feature_extractor" in kwargs:
52
+ warnings.warn(
53
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
54
+ " instead.",
55
+ FutureWarning,
56
+ )
57
+ feature_extractor = kwargs.pop("feature_extractor")
58
+
59
+ image_processor = image_processor if image_processor is not None else feature_extractor
60
+ if image_processor is None:
61
+ raise ValueError("You need to specify an `image_processor`.")
62
+ if tokenizer is None:
63
+ raise ValueError("You need to specify a `tokenizer`.")
64
+
65
+ super().__init__(image_processor, tokenizer)
66
+
67
+ def __call__(
68
+ self,
69
+ images,
70
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
71
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
72
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
73
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
74
+ add_special_tokens: bool = True,
75
+ padding: Union[bool, str, PaddingStrategy] = False,
76
+ truncation: Union[bool, str, TruncationStrategy] = None,
77
+ max_length: Optional[int] = None,
78
+ stride: int = 0,
79
+ pad_to_multiple_of: Optional[int] = None,
80
+ return_token_type_ids: Optional[bool] = None,
81
+ return_attention_mask: Optional[bool] = None,
82
+ return_overflowing_tokens: bool = False,
83
+ return_special_tokens_mask: bool = False,
84
+ return_offsets_mapping: bool = False,
85
+ return_length: bool = False,
86
+ verbose: bool = True,
87
+ return_tensors: Optional[Union[str, TensorType]] = None,
88
+ **kwargs,
89
+ ) -> BatchEncoding:
90
+ """
91
+ This method first forwards the `images` argument to [`~LayoutLMv2ImagePrpcessor.__call__`]. In case
92
+ [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
93
+ bounding boxes along with the additional arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output,
94
+ together with resized `images`. In case [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to
95
+ `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
96
+ arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output, together with resized `images``.
97
+
98
+ Please refer to the docstring of the above two methods for more information.
99
+ """
100
+ # verify input
101
+ if self.image_processor.apply_ocr and (boxes is not None):
102
+ raise ValueError(
103
+ "You cannot provide bounding boxes "
104
+ "if you initialized the image processor with apply_ocr set to True."
105
+ )
106
+
107
+ if self.image_processor.apply_ocr and (word_labels is not None):
108
+ raise ValueError(
109
+ "You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
110
+ )
111
+
112
+ if return_overflowing_tokens is True and return_offsets_mapping is False:
113
+ raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
114
+
115
+ # first, apply the image processor
116
+ features = self.image_processor(images=images, return_tensors=return_tensors)
117
+
118
+ # second, apply the tokenizer
119
+ if text is not None and self.image_processor.apply_ocr and text_pair is None:
120
+ if isinstance(text, str):
121
+ text = [text] # add batch dimension (as the image processor always adds a batch dimension)
122
+ text_pair = features["words"]
123
+
124
+ encoded_inputs = self.tokenizer(
125
+ text=text if text is not None else features["words"],
126
+ text_pair=text_pair if text_pair is not None else None,
127
+ boxes=boxes if boxes is not None else features["boxes"],
128
+ word_labels=word_labels,
129
+ add_special_tokens=add_special_tokens,
130
+ padding=padding,
131
+ truncation=truncation,
132
+ max_length=max_length,
133
+ stride=stride,
134
+ pad_to_multiple_of=pad_to_multiple_of,
135
+ return_token_type_ids=return_token_type_ids,
136
+ return_attention_mask=return_attention_mask,
137
+ return_overflowing_tokens=return_overflowing_tokens,
138
+ return_special_tokens_mask=return_special_tokens_mask,
139
+ return_offsets_mapping=return_offsets_mapping,
140
+ return_length=return_length,
141
+ verbose=verbose,
142
+ return_tensors=return_tensors,
143
+ **kwargs,
144
+ )
145
+
146
+ # add pixel values
147
+ images = features.pop("pixel_values")
148
+ if return_overflowing_tokens is True:
149
+ images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"])
150
+ encoded_inputs["image"] = images
151
+
152
+ return encoded_inputs
153
+
154
+ def get_overflowing_images(self, images, overflow_to_sample_mapping):
155
+ # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
156
+ images_with_overflow = []
157
+ for sample_idx in overflow_to_sample_mapping:
158
+ images_with_overflow.append(images[sample_idx])
159
+
160
+ if len(images_with_overflow) != len(overflow_to_sample_mapping):
161
+ raise ValueError(
162
+ "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
163
+ f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
164
+ )
165
+
166
+ return images_with_overflow
167
+
168
+ def batch_decode(self, *args, **kwargs):
169
+ """
170
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
171
+ refer to the docstring of this method for more information.
172
+ """
173
+ return self.tokenizer.batch_decode(*args, **kwargs)
174
+
175
+ def decode(self, *args, **kwargs):
176
+ """
177
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
178
+ to the docstring of this method for more information.
179
+ """
180
+ return self.tokenizer.decode(*args, **kwargs)
181
+
182
+ @property
183
+ def model_input_names(self):
184
+ return ["input_ids", "bbox", "attention_mask", "image"]
185
+
186
+ @property
187
+ def feature_extractor_class(self):
188
+ warnings.warn(
189
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
190
+ FutureWarning,
191
+ )
192
+ return self.image_processor_class
193
+
194
+ @property
195
+ def feature_extractor(self):
196
+ warnings.warn(
197
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
198
+ FutureWarning,
199
+ )
200
+ return self.image_processor
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py ADDED
@@ -0,0 +1,1170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for LayoutXLM model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PreTokenizedInput,
29
+ TextInput,
30
+ TextInputPair,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
34
+ from ..xlm_roberta.tokenization_xlm_roberta import (
35
+ SPIECE_UNDERLINE,
36
+ VOCAB_FILES_NAMES,
37
+ )
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
44
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
45
+ Whether or not to encode the sequences with the special tokens relative to their model.
46
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
47
+ Activates and controls padding. Accepts the following values:
48
+
49
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
50
+ sequence if provided).
51
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
52
+ acceptable input length for the model if that argument is not provided.
53
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
54
+ lengths).
55
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
56
+ Activates and controls truncation. Accepts the following values:
57
+
58
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
59
+ to the maximum acceptable input length for the model if that argument is not provided. This will
60
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
61
+ sequences (or a batch of pairs) is provided.
62
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
63
+ maximum acceptable input length for the model if that argument is not provided. This will only
64
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
65
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
66
+ maximum acceptable input length for the model if that argument is not provided. This will only
67
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
68
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
69
+ greater than the model maximum admissible input size).
70
+ max_length (`int`, *optional*):
71
+ Controls the maximum length to use by one of the truncation/padding parameters.
72
+
73
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
74
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
75
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
76
+ stride (`int`, *optional*, defaults to 0):
77
+ If set to a number along with `max_length`, the overflowing tokens returned when
78
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
79
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
80
+ argument defines the number of overlapping tokens.
81
+ pad_to_multiple_of (`int`, *optional*):
82
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
83
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
84
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
85
+ If set, will return tensors instead of list of python integers. Acceptable values are:
86
+
87
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
88
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
89
+ - `'np'`: Return Numpy `np.ndarray` objects.
90
+ return_token_type_ids (`bool`, *optional*):
91
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
92
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
93
+
94
+ [What are token type IDs?](../glossary#token-type-ids)
95
+ return_attention_mask (`bool`, *optional*):
96
+ Whether to return the attention mask. If left to the default, will return the attention mask according
97
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
98
+
99
+ [What are attention masks?](../glossary#attention-mask)
100
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
101
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
102
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
103
+ of returning overflowing tokens.
104
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
105
+ Whether or not to return special tokens mask information.
106
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
107
+ Whether or not to return `(char_start, char_end)` for each token.
108
+
109
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
110
+ Python's tokenizer, this method will raise `NotImplementedError`.
111
+ return_length (`bool`, *optional*, defaults to `False`):
112
+ Whether or not to return the lengths of the encoded inputs.
113
+ verbose (`bool`, *optional*, defaults to `True`):
114
+ Whether or not to print more information and warnings.
115
+ **kwargs: passed to the `self.tokenize()` method
116
+
117
+ Return:
118
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
119
+
120
+ - **input_ids** -- List of token ids to be fed to a model.
121
+
122
+ [What are input IDs?](../glossary#input-ids)
123
+
124
+ - **bbox** -- List of bounding boxes to be fed to a model.
125
+
126
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
127
+ if *"token_type_ids"* is in `self.model_input_names`).
128
+
129
+ [What are token type IDs?](../glossary#token-type-ids)
130
+
131
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
132
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
133
+
134
+ [What are attention masks?](../glossary#attention-mask)
135
+
136
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
137
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
138
+ `return_overflowing_tokens=True`).
139
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
140
+ `return_overflowing_tokens=True`).
141
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
142
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
143
+ - **length** -- The length of the inputs (when `return_length=True`).
144
+ """
145
+
146
+
147
+ class LayoutXLMTokenizer(PreTrainedTokenizer):
148
+ """
149
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
150
+ [SentencePiece](https://github.com/google/sentencepiece).
151
+
152
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
153
+ this superclass for more information regarding those methods.
154
+
155
+ Args:
156
+ vocab_file (`str`):
157
+ Path to the vocabulary file.
158
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
159
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
160
+
161
+ <Tip>
162
+
163
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
164
+ sequence. The token used is the `cls_token`.
165
+
166
+ </Tip>
167
+
168
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
169
+ The end of sequence token.
170
+
171
+ <Tip>
172
+
173
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
174
+ The token used is the `sep_token`.
175
+
176
+ </Tip>
177
+
178
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
179
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
180
+ sequence classification or for a text and a question for question answering. It is also used as the last
181
+ token of a sequence built with special tokens.
182
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
183
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
184
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
185
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
186
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
187
+ token instead.
188
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
189
+ The token used for padding, for example when batching sequences of different lengths.
190
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
191
+ The token used for masking values. This is the token used when training this model with masked language
192
+ modeling. This is the token which the model will try to predict.
193
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
194
+ The bounding box to use for the special [CLS] token.
195
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
196
+ The bounding box to use for the special [SEP] token.
197
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
198
+ The bounding box to use for the special [PAD] token.
199
+ pad_token_label (`int`, *optional*, defaults to -100):
200
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
201
+ CrossEntropyLoss.
202
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
203
+ Whether or not to only label the first subword, in case word labels are provided.
204
+ sp_model_kwargs (`dict`, *optional*):
205
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
206
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
207
+ to set:
208
+
209
+ - `enable_sampling`: Enable subword regularization.
210
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
211
+
212
+ - `nbest_size = {0,1}`: No sampling is performed.
213
+ - `nbest_size > 1`: samples from the nbest_size results.
214
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
215
+ using forward-filtering-and-backward-sampling algorithm.
216
+
217
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
218
+ BPE-dropout.
219
+
220
+ Attributes:
221
+ sp_model (`SentencePieceProcessor`):
222
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
223
+ """
224
+
225
+ vocab_files_names = VOCAB_FILES_NAMES
226
+ model_input_names = ["input_ids", "attention_mask"]
227
+
228
+ def __init__(
229
+ self,
230
+ vocab_file,
231
+ bos_token="<s>",
232
+ eos_token="</s>",
233
+ sep_token="</s>",
234
+ cls_token="<s>",
235
+ unk_token="<unk>",
236
+ pad_token="<pad>",
237
+ mask_token="<mask>",
238
+ cls_token_box=[0, 0, 0, 0],
239
+ sep_token_box=[1000, 1000, 1000, 1000],
240
+ pad_token_box=[0, 0, 0, 0],
241
+ pad_token_label=-100,
242
+ only_label_first_subword=True,
243
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
244
+ **kwargs,
245
+ ) -> None:
246
+ # Mask token behave like a normal word, i.e. include the space before it
247
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
248
+
249
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
250
+
251
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
252
+ self.sp_model.Load(str(vocab_file))
253
+ self.vocab_file = vocab_file
254
+
255
+ # Original fairseq vocab and spm vocab must be "aligned":
256
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
257
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
258
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
259
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
260
+
261
+ # Mimic fairseq token-to-id alignment for the first 4 token
262
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
263
+
264
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
265
+ self.fairseq_offset = 1
266
+
267
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
268
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
269
+
270
+ # additional properties
271
+ self.cls_token_box = cls_token_box
272
+ self.sep_token_box = sep_token_box
273
+ self.pad_token_box = pad_token_box
274
+ self.pad_token_label = pad_token_label
275
+ self.only_label_first_subword = only_label_first_subword
276
+
277
+ super().__init__(
278
+ bos_token=bos_token,
279
+ eos_token=eos_token,
280
+ unk_token=unk_token,
281
+ sep_token=sep_token,
282
+ cls_token=cls_token,
283
+ pad_token=pad_token,
284
+ mask_token=mask_token,
285
+ cls_token_box=cls_token_box,
286
+ sep_token_box=sep_token_box,
287
+ pad_token_box=pad_token_box,
288
+ pad_token_label=pad_token_label,
289
+ only_label_first_subword=only_label_first_subword,
290
+ sp_model_kwargs=self.sp_model_kwargs,
291
+ **kwargs,
292
+ )
293
+
294
+ def __getstate__(self):
295
+ state = self.__dict__.copy()
296
+ state["sp_model"] = None
297
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
298
+ return state
299
+
300
+ def __setstate__(self, d):
301
+ self.__dict__ = d
302
+
303
+ # for backward compatibility
304
+ if not hasattr(self, "sp_model_kwargs"):
305
+ self.sp_model_kwargs = {}
306
+
307
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
308
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
309
+
310
+ def build_inputs_with_special_tokens(
311
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
312
+ ) -> List[int]:
313
+ """
314
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
315
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
316
+
317
+ - single sequence: `<s> X </s>`
318
+ - pair of sequences: `<s> A </s></s> B </s>`
319
+
320
+ Args:
321
+ token_ids_0 (`List[int]`):
322
+ List of IDs to which the special tokens will be added.
323
+ token_ids_1 (`List[int]`, *optional*):
324
+ Optional second list of IDs for sequence pairs.
325
+
326
+ Returns:
327
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
328
+ """
329
+
330
+ if token_ids_1 is None:
331
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
332
+ cls = [self.cls_token_id]
333
+ sep = [self.sep_token_id]
334
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
335
+
336
+ def get_special_tokens_mask(
337
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
338
+ ) -> List[int]:
339
+ """
340
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
341
+ special tokens using the tokenizer `prepare_for_model` method.
342
+
343
+ Args:
344
+ token_ids_0 (`List[int]`):
345
+ List of IDs.
346
+ token_ids_1 (`List[int]`, *optional*):
347
+ Optional second list of IDs for sequence pairs.
348
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
349
+ Whether or not the token list is already formatted with special tokens for the model.
350
+
351
+ Returns:
352
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
353
+ """
354
+
355
+ if already_has_special_tokens:
356
+ return super().get_special_tokens_mask(
357
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
358
+ )
359
+
360
+ if token_ids_1 is None:
361
+ return [1] + ([0] * len(token_ids_0)) + [1]
362
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
363
+
364
+ def create_token_type_ids_from_sequences(
365
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
366
+ ) -> List[int]:
367
+ """
368
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
369
+ not make use of token type ids, therefore a list of zeros is returned.
370
+
371
+ Args:
372
+ token_ids_0 (`List[int]`):
373
+ List of IDs.
374
+ token_ids_1 (`List[int]`, *optional*):
375
+ Optional second list of IDs for sequence pairs.
376
+
377
+ Returns:
378
+ `List[int]`: List of zeros.
379
+
380
+ """
381
+
382
+ sep = [self.sep_token_id]
383
+ cls = [self.cls_token_id]
384
+
385
+ if token_ids_1 is None:
386
+ return len(cls + token_ids_0 + sep) * [0]
387
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
388
+
389
+ @property
390
+ def vocab_size(self):
391
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
392
+
393
+ def get_vocab(self):
394
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
395
+ vocab.update(self.added_tokens_encoder)
396
+ return vocab
397
+
398
+ def _tokenize(self, text: str) -> List[str]:
399
+ return self.sp_model.encode(text, out_type=str)
400
+
401
+ def _convert_token_to_id(self, token):
402
+ """Converts a token (str) in an id using the vocab."""
403
+ if token in self.fairseq_tokens_to_ids:
404
+ return self.fairseq_tokens_to_ids[token]
405
+ spm_id = self.sp_model.PieceToId(token)
406
+
407
+ # Need to return unknown token if the SP model returned 0
408
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
409
+
410
+ def _convert_id_to_token(self, index):
411
+ """Converts an index (integer) in a token (str) using the vocab."""
412
+ if index in self.fairseq_ids_to_tokens:
413
+ return self.fairseq_ids_to_tokens[index]
414
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
415
+
416
+ def convert_tokens_to_string(self, tokens):
417
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
418
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
419
+ return out_string
420
+
421
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
422
+ if not os.path.isdir(save_directory):
423
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
424
+ return
425
+ out_vocab_file = os.path.join(
426
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
427
+ )
428
+
429
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
430
+ copyfile(self.vocab_file, out_vocab_file)
431
+ elif not os.path.isfile(self.vocab_file):
432
+ with open(out_vocab_file, "wb") as fi:
433
+ content_spiece_model = self.sp_model.serialized_model_proto()
434
+ fi.write(content_spiece_model)
435
+
436
+ return (out_vocab_file,)
437
+
438
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
439
+ def __call__(
440
+ self,
441
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
442
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
443
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
444
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
445
+ add_special_tokens: bool = True,
446
+ padding: Union[bool, str, PaddingStrategy] = False,
447
+ truncation: Union[bool, str, TruncationStrategy] = None,
448
+ max_length: Optional[int] = None,
449
+ stride: int = 0,
450
+ pad_to_multiple_of: Optional[int] = None,
451
+ return_tensors: Optional[Union[str, TensorType]] = None,
452
+ return_token_type_ids: Optional[bool] = None,
453
+ return_attention_mask: Optional[bool] = None,
454
+ return_overflowing_tokens: bool = False,
455
+ return_special_tokens_mask: bool = False,
456
+ return_offsets_mapping: bool = False,
457
+ return_length: bool = False,
458
+ verbose: bool = True,
459
+ **kwargs,
460
+ ) -> BatchEncoding:
461
+ """
462
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
463
+ sequences with word-level normalized bounding boxes and optional labels.
464
+
465
+ Args:
466
+ text (`str`, `List[str]`, `List[List[str]]`):
467
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
468
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
469
+ words).
470
+ text_pair (`List[str]`, `List[List[str]]`):
471
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
472
+ (pretokenized string).
473
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
474
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
475
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
476
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
477
+ """
478
+
479
+ # Input type checking for clearer error
480
+ def _is_valid_text_input(t):
481
+ if isinstance(t, str):
482
+ # Strings are fine
483
+ return True
484
+ elif isinstance(t, (list, tuple)):
485
+ # List are fine as long as they are...
486
+ if len(t) == 0:
487
+ # ... empty
488
+ return True
489
+ elif isinstance(t[0], str):
490
+ # ... list of strings
491
+ return True
492
+ elif isinstance(t[0], (list, tuple)):
493
+ # ... list with an empty list or with a list of strings
494
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
495
+ else:
496
+ return False
497
+ else:
498
+ return False
499
+
500
+ if text_pair is not None:
501
+ # in case text + text_pair are provided, text = questions, text_pair = words
502
+ if not _is_valid_text_input(text):
503
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
504
+ if not isinstance(text_pair, (list, tuple)):
505
+ raise ValueError(
506
+ "words must of type `List[str]` (single pretokenized example), "
507
+ "or `List[List[str]]` (batch of pretokenized examples)."
508
+ )
509
+ else:
510
+ # in case only text is provided => must be words
511
+ if not isinstance(text, (list, tuple)):
512
+ raise ValueError(
513
+ "Words must of type `List[str]` (single pretokenized example), "
514
+ "or `List[List[str]]` (batch of pretokenized examples)."
515
+ )
516
+
517
+ if text_pair is not None:
518
+ is_batched = isinstance(text, (list, tuple))
519
+ else:
520
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
521
+
522
+ words = text if text_pair is None else text_pair
523
+ if boxes is None:
524
+ raise ValueError("You must provide corresponding bounding boxes")
525
+ if is_batched:
526
+ if len(words) != len(boxes):
527
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
528
+ for words_example, boxes_example in zip(words, boxes):
529
+ if len(words_example) != len(boxes_example):
530
+ raise ValueError("You must provide as many words as there are bounding boxes")
531
+ else:
532
+ if len(words) != len(boxes):
533
+ raise ValueError("You must provide as many words as there are bounding boxes")
534
+
535
+ if is_batched:
536
+ if text_pair is not None and len(text) != len(text_pair):
537
+ raise ValueError(
538
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
539
+ f" {len(text_pair)}."
540
+ )
541
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
542
+ is_pair = bool(text_pair is not None)
543
+ return self.batch_encode_plus(
544
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
545
+ is_pair=is_pair,
546
+ boxes=boxes,
547
+ word_labels=word_labels,
548
+ add_special_tokens=add_special_tokens,
549
+ padding=padding,
550
+ truncation=truncation,
551
+ max_length=max_length,
552
+ stride=stride,
553
+ pad_to_multiple_of=pad_to_multiple_of,
554
+ return_tensors=return_tensors,
555
+ return_token_type_ids=return_token_type_ids,
556
+ return_attention_mask=return_attention_mask,
557
+ return_overflowing_tokens=return_overflowing_tokens,
558
+ return_special_tokens_mask=return_special_tokens_mask,
559
+ return_offsets_mapping=return_offsets_mapping,
560
+ return_length=return_length,
561
+ verbose=verbose,
562
+ **kwargs,
563
+ )
564
+ else:
565
+ return self.encode_plus(
566
+ text=text,
567
+ text_pair=text_pair,
568
+ boxes=boxes,
569
+ word_labels=word_labels,
570
+ add_special_tokens=add_special_tokens,
571
+ padding=padding,
572
+ truncation=truncation,
573
+ max_length=max_length,
574
+ stride=stride,
575
+ pad_to_multiple_of=pad_to_multiple_of,
576
+ return_tensors=return_tensors,
577
+ return_token_type_ids=return_token_type_ids,
578
+ return_attention_mask=return_attention_mask,
579
+ return_overflowing_tokens=return_overflowing_tokens,
580
+ return_special_tokens_mask=return_special_tokens_mask,
581
+ return_offsets_mapping=return_offsets_mapping,
582
+ return_length=return_length,
583
+ verbose=verbose,
584
+ **kwargs,
585
+ )
586
+
587
+ def _batch_encode_plus(
588
+ self,
589
+ batch_text_or_text_pairs: Union[
590
+ List[TextInput],
591
+ List[TextInputPair],
592
+ List[PreTokenizedInput],
593
+ ],
594
+ is_pair: bool = None,
595
+ boxes: Optional[List[List[List[int]]]] = None,
596
+ word_labels: Optional[List[List[int]]] = None,
597
+ add_special_tokens: bool = True,
598
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
599
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
600
+ max_length: Optional[int] = None,
601
+ stride: int = 0,
602
+ pad_to_multiple_of: Optional[int] = None,
603
+ return_tensors: Optional[Union[str, TensorType]] = None,
604
+ return_token_type_ids: Optional[bool] = None,
605
+ return_attention_mask: Optional[bool] = None,
606
+ return_overflowing_tokens: bool = False,
607
+ return_special_tokens_mask: bool = False,
608
+ return_offsets_mapping: bool = False,
609
+ return_length: bool = False,
610
+ verbose: bool = True,
611
+ **kwargs,
612
+ ) -> BatchEncoding:
613
+ if return_offsets_mapping:
614
+ raise NotImplementedError(
615
+ "return_offset_mapping is not available when using Python tokenizers. "
616
+ "To use this feature, change your tokenizer to one deriving from "
617
+ "transformers.PreTrainedTokenizerFast."
618
+ )
619
+
620
+ batch_outputs = self._batch_prepare_for_model(
621
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
622
+ is_pair=is_pair,
623
+ boxes=boxes,
624
+ word_labels=word_labels,
625
+ add_special_tokens=add_special_tokens,
626
+ padding_strategy=padding_strategy,
627
+ truncation_strategy=truncation_strategy,
628
+ max_length=max_length,
629
+ stride=stride,
630
+ pad_to_multiple_of=pad_to_multiple_of,
631
+ return_attention_mask=return_attention_mask,
632
+ return_token_type_ids=return_token_type_ids,
633
+ return_overflowing_tokens=return_overflowing_tokens,
634
+ return_special_tokens_mask=return_special_tokens_mask,
635
+ return_length=return_length,
636
+ return_tensors=return_tensors,
637
+ verbose=verbose,
638
+ )
639
+
640
+ return BatchEncoding(batch_outputs)
641
+
642
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
643
+ def _batch_prepare_for_model(
644
+ self,
645
+ batch_text_or_text_pairs,
646
+ is_pair: bool = None,
647
+ boxes: Optional[List[List[int]]] = None,
648
+ word_labels: Optional[List[List[int]]] = None,
649
+ add_special_tokens: bool = True,
650
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
651
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
652
+ max_length: Optional[int] = None,
653
+ stride: int = 0,
654
+ pad_to_multiple_of: Optional[int] = None,
655
+ return_tensors: Optional[str] = None,
656
+ return_token_type_ids: Optional[bool] = None,
657
+ return_attention_mask: Optional[bool] = None,
658
+ return_overflowing_tokens: bool = False,
659
+ return_special_tokens_mask: bool = False,
660
+ return_length: bool = False,
661
+ verbose: bool = True,
662
+ ) -> BatchEncoding:
663
+ """
664
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
665
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
666
+ manages a moving window (with user defined stride) for overflowing tokens
667
+
668
+ Args:
669
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
670
+ """
671
+
672
+ batch_outputs = {}
673
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
674
+ batch_text_or_text_pair, boxes_example = example
675
+ outputs = self.prepare_for_model(
676
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
677
+ batch_text_or_text_pair[1] if is_pair else None,
678
+ boxes_example,
679
+ word_labels=word_labels[idx] if word_labels is not None else None,
680
+ add_special_tokens=add_special_tokens,
681
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
682
+ truncation=truncation_strategy.value,
683
+ max_length=max_length,
684
+ stride=stride,
685
+ pad_to_multiple_of=None, # we pad in batch afterward
686
+ return_attention_mask=False, # we pad in batch afterward
687
+ return_token_type_ids=return_token_type_ids,
688
+ return_overflowing_tokens=return_overflowing_tokens,
689
+ return_special_tokens_mask=return_special_tokens_mask,
690
+ return_length=return_length,
691
+ return_tensors=None, # We convert the whole batch to tensors at the end
692
+ prepend_batch_axis=False,
693
+ verbose=verbose,
694
+ )
695
+
696
+ for key, value in outputs.items():
697
+ if key not in batch_outputs:
698
+ batch_outputs[key] = []
699
+ batch_outputs[key].append(value)
700
+
701
+ batch_outputs = self.pad(
702
+ batch_outputs,
703
+ padding=padding_strategy.value,
704
+ max_length=max_length,
705
+ pad_to_multiple_of=pad_to_multiple_of,
706
+ return_attention_mask=return_attention_mask,
707
+ )
708
+
709
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
710
+
711
+ return batch_outputs
712
+
713
+ def _encode_plus(
714
+ self,
715
+ text: Union[TextInput, PreTokenizedInput],
716
+ text_pair: Optional[PreTokenizedInput] = None,
717
+ boxes: Optional[List[List[int]]] = None,
718
+ word_labels: Optional[List[int]] = None,
719
+ add_special_tokens: bool = True,
720
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
721
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
722
+ max_length: Optional[int] = None,
723
+ stride: int = 0,
724
+ pad_to_multiple_of: Optional[int] = None,
725
+ return_tensors: Optional[Union[str, TensorType]] = None,
726
+ return_token_type_ids: Optional[bool] = None,
727
+ return_attention_mask: Optional[bool] = None,
728
+ return_overflowing_tokens: bool = False,
729
+ return_special_tokens_mask: bool = False,
730
+ return_offsets_mapping: bool = False,
731
+ return_length: bool = False,
732
+ verbose: bool = True,
733
+ **kwargs,
734
+ ) -> BatchEncoding:
735
+ if return_offsets_mapping:
736
+ raise NotImplementedError(
737
+ "return_offset_mapping is not available when using Python tokenizers. "
738
+ "To use this feature, change your tokenizer to one deriving from "
739
+ "transformers.PreTrainedTokenizerFast. "
740
+ "More information on available tokenizers at "
741
+ "https://github.com/huggingface/transformers/pull/2674"
742
+ )
743
+
744
+ return self.prepare_for_model(
745
+ text=text,
746
+ text_pair=text_pair,
747
+ boxes=boxes,
748
+ word_labels=word_labels,
749
+ add_special_tokens=add_special_tokens,
750
+ padding=padding_strategy.value,
751
+ truncation=truncation_strategy.value,
752
+ max_length=max_length,
753
+ stride=stride,
754
+ pad_to_multiple_of=pad_to_multiple_of,
755
+ return_tensors=return_tensors,
756
+ prepend_batch_axis=True,
757
+ return_attention_mask=return_attention_mask,
758
+ return_token_type_ids=return_token_type_ids,
759
+ return_overflowing_tokens=return_overflowing_tokens,
760
+ return_special_tokens_mask=return_special_tokens_mask,
761
+ return_length=return_length,
762
+ verbose=verbose,
763
+ )
764
+
765
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
766
+ def prepare_for_model(
767
+ self,
768
+ text: Union[TextInput, PreTokenizedInput],
769
+ text_pair: Optional[PreTokenizedInput] = None,
770
+ boxes: Optional[List[List[int]]] = None,
771
+ word_labels: Optional[List[int]] = None,
772
+ add_special_tokens: bool = True,
773
+ padding: Union[bool, str, PaddingStrategy] = False,
774
+ truncation: Union[bool, str, TruncationStrategy] = None,
775
+ max_length: Optional[int] = None,
776
+ stride: int = 0,
777
+ pad_to_multiple_of: Optional[int] = None,
778
+ return_tensors: Optional[Union[str, TensorType]] = None,
779
+ return_token_type_ids: Optional[bool] = None,
780
+ return_attention_mask: Optional[bool] = None,
781
+ return_overflowing_tokens: bool = False,
782
+ return_special_tokens_mask: bool = False,
783
+ return_offsets_mapping: bool = False,
784
+ return_length: bool = False,
785
+ verbose: bool = True,
786
+ prepend_batch_axis: bool = False,
787
+ **kwargs,
788
+ ) -> BatchEncoding:
789
+ """
790
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
791
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
792
+ (with user defined stride) for overflowing tokens.
793
+
794
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
795
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
796
+ labeled with -100, such that they will be ignored by the loss function.
797
+
798
+ Args:
799
+ text (`str`, `List[str]`, `List[List[str]]`):
800
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
801
+ text_pair (`List[str]` or `List[int]`, *optional*):
802
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
803
+ list of list of strings (words of a batch of examples).
804
+ """
805
+
806
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
807
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
808
+ padding=padding,
809
+ truncation=truncation,
810
+ max_length=max_length,
811
+ pad_to_multiple_of=pad_to_multiple_of,
812
+ verbose=verbose,
813
+ **kwargs,
814
+ )
815
+
816
+ tokens = []
817
+ pair_tokens = []
818
+ token_boxes = []
819
+ pair_token_boxes = []
820
+ labels = []
821
+
822
+ if text_pair is None:
823
+ if word_labels is None:
824
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
825
+ for word, box in zip(text, boxes):
826
+ if len(word) < 1: # skip empty words
827
+ continue
828
+ word_tokens = self.tokenize(word)
829
+ tokens.extend(word_tokens)
830
+ token_boxes.extend([box] * len(word_tokens))
831
+ else:
832
+ # CASE 2: token classification (training)
833
+ for word, box, label in zip(text, boxes, word_labels):
834
+ if len(word) < 1: # skip empty words
835
+ continue
836
+ word_tokens = self.tokenize(word)
837
+ tokens.extend(word_tokens)
838
+ token_boxes.extend([box] * len(word_tokens))
839
+ if self.only_label_first_subword:
840
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
841
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
842
+ else:
843
+ labels.extend([label] * len(word_tokens))
844
+ else:
845
+ # CASE 3: document visual question answering (inference)
846
+ # text = question
847
+ # text_pair = words
848
+ tokens = self.tokenize(text)
849
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]
850
+
851
+ for word, box in zip(text_pair, boxes):
852
+ if len(word) < 1: # skip empty words
853
+ continue
854
+ word_tokens = self.tokenize(word)
855
+ pair_tokens.extend(word_tokens)
856
+ pair_token_boxes.extend([box] * len(word_tokens))
857
+
858
+ # Create ids + pair_ids
859
+ ids = self.convert_tokens_to_ids(tokens)
860
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
861
+
862
+ # Compute the total size of the returned encodings
863
+ pair = bool(pair_ids is not None)
864
+ len_ids = len(ids)
865
+ len_pair_ids = len(pair_ids) if pair else 0
866
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
867
+
868
+ # Truncation: Handle max sequence length
869
+ overflowing_tokens = []
870
+ overflowing_token_boxes = []
871
+ overflowing_labels = []
872
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
873
+ (
874
+ ids,
875
+ token_boxes,
876
+ pair_ids,
877
+ pair_token_boxes,
878
+ labels,
879
+ overflowing_tokens,
880
+ overflowing_token_boxes,
881
+ overflowing_labels,
882
+ ) = self.truncate_sequences(
883
+ ids,
884
+ token_boxes,
885
+ pair_ids=pair_ids,
886
+ pair_token_boxes=pair_token_boxes,
887
+ labels=labels,
888
+ num_tokens_to_remove=total_len - max_length,
889
+ truncation_strategy=truncation_strategy,
890
+ stride=stride,
891
+ )
892
+
893
+ if return_token_type_ids and not add_special_tokens:
894
+ raise ValueError(
895
+ "Asking to return token_type_ids while setting add_special_tokens to False "
896
+ "results in an undefined behavior. Please set add_special_tokens to True or "
897
+ "set return_token_type_ids to None."
898
+ )
899
+
900
+ # Load from model defaults
901
+ if return_token_type_ids is None:
902
+ return_token_type_ids = "token_type_ids" in self.model_input_names
903
+ if return_attention_mask is None:
904
+ return_attention_mask = "attention_mask" in self.model_input_names
905
+
906
+ encoded_inputs = {}
907
+
908
+ if return_overflowing_tokens:
909
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
910
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
911
+ encoded_inputs["overflowing_labels"] = overflowing_labels
912
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
913
+
914
+ # Add special tokens
915
+ if add_special_tokens:
916
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
917
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
918
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
919
+ if pair_token_boxes:
920
+ pair_token_boxes = pair_token_boxes + [self.sep_token_box]
921
+ if labels:
922
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
923
+ else:
924
+ sequence = ids + pair_ids if pair else ids
925
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
926
+
927
+ # Build output dictionary
928
+ encoded_inputs["input_ids"] = sequence
929
+ encoded_inputs["bbox"] = token_boxes + pair_token_boxes
930
+ if return_token_type_ids:
931
+ encoded_inputs["token_type_ids"] = token_type_ids
932
+ if return_special_tokens_mask:
933
+ if add_special_tokens:
934
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
935
+ else:
936
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
937
+
938
+ if labels:
939
+ encoded_inputs["labels"] = labels
940
+
941
+ # Check lengths
942
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
943
+
944
+ # Padding
945
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
946
+ encoded_inputs = self.pad(
947
+ encoded_inputs,
948
+ max_length=max_length,
949
+ padding=padding_strategy.value,
950
+ pad_to_multiple_of=pad_to_multiple_of,
951
+ return_attention_mask=return_attention_mask,
952
+ )
953
+
954
+ if return_length:
955
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
956
+
957
+ batch_outputs = BatchEncoding(
958
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
959
+ )
960
+
961
+ return batch_outputs
962
+
963
+ def truncate_sequences(
964
+ self,
965
+ ids: List[int],
966
+ token_boxes: List[List[int]],
967
+ pair_ids: Optional[List[int]] = None,
968
+ pair_token_boxes: Optional[List[List[int]]] = None,
969
+ labels: Optional[List[int]] = None,
970
+ num_tokens_to_remove: int = 0,
971
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
972
+ stride: int = 0,
973
+ ) -> Tuple[List[int], List[int], List[int]]:
974
+ """
975
+ Truncates a sequence pair in-place following the strategy.
976
+
977
+ Args:
978
+ ids (`List[int]`):
979
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
980
+ `convert_tokens_to_ids` methods.
981
+ token_boxes (`List[List[int]]`):
982
+ Bounding boxes of the first sequence.
983
+ pair_ids (`List[int]`, *optional*):
984
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
985
+ and `convert_tokens_to_ids` methods.
986
+ pair_token_boxes (`List[List[int]]`, *optional*):
987
+ Bounding boxes of the second sequence.
988
+ labels (`List[int]`, *optional*):
989
+ Labels of the first sequence (for token classification tasks).
990
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
991
+ Number of tokens to remove using the truncation strategy.
992
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
993
+ The strategy to follow for truncation. Can be:
994
+
995
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
996
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
997
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
998
+ batch of pairs) is provided.
999
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1000
+ maximum acceptable input length for the model if that argument is not provided. This will only
1001
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1002
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
1003
+ maximum acceptable input length for the model if that argument is not provided. This will only
1004
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1005
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
1006
+ than the model maximum admissible input size).
1007
+ stride (`int`, *optional*, defaults to 0):
1008
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
1009
+ sequence returned. The value of this argument defines the number of additional tokens.
1010
+
1011
+ Returns:
1012
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
1013
+ overflowing tokens.
1014
+ """
1015
+ if num_tokens_to_remove <= 0:
1016
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
1017
+
1018
+ if not isinstance(truncation_strategy, TruncationStrategy):
1019
+ truncation_strategy = TruncationStrategy(truncation_strategy)
1020
+
1021
+ overflowing_tokens = []
1022
+ overflowing_token_boxes = []
1023
+ overflowing_labels = []
1024
+ if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
1025
+ for _ in range(num_tokens_to_remove):
1026
+ if pair_ids is None or len(ids) > len(pair_ids):
1027
+ if not overflowing_tokens:
1028
+ window_len = min(len(ids), stride + 1)
1029
+ else:
1030
+ window_len = 1
1031
+ overflowing_tokens.extend(ids[-window_len:])
1032
+ overflowing_token_boxes.extend(token_boxes[-window_len:])
1033
+ overflowing_labels.extend(labels[-window_len:])
1034
+ ids = ids[:-1]
1035
+ token_boxes = token_boxes[:-1]
1036
+ labels = labels[:-1]
1037
+ else:
1038
+ if not overflowing_tokens:
1039
+ window_len = min(len(pair_ids), stride + 1)
1040
+ else:
1041
+ window_len = 1
1042
+ overflowing_tokens.extend(pair_ids[-window_len:])
1043
+ overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
1044
+ pair_ids = pair_ids[:-1]
1045
+ pair_token_boxes = pair_token_boxes[:-1]
1046
+ elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
1047
+ if len(ids) > num_tokens_to_remove:
1048
+ window_len = min(len(ids), stride + num_tokens_to_remove)
1049
+ overflowing_tokens = ids[-window_len:]
1050
+ overflowing_token_boxes = token_boxes[-window_len:]
1051
+ overflowing_labels = labels[-window_len:]
1052
+ ids = ids[:-num_tokens_to_remove]
1053
+ token_boxes = token_boxes[:-num_tokens_to_remove]
1054
+ labels = labels[:-num_tokens_to_remove]
1055
+ else:
1056
+ logger.error(
1057
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1058
+ f"but the first sequence has a length {len(ids)}. "
1059
+ f"Please select another truncation strategy than {truncation_strategy}, "
1060
+ "for instance 'longest_first' or 'only_second'."
1061
+ )
1062
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
1063
+ if len(pair_ids) > num_tokens_to_remove:
1064
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
1065
+ overflowing_tokens = pair_ids[-window_len:]
1066
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
1067
+ pair_ids = pair_ids[:-num_tokens_to_remove]
1068
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
1069
+ else:
1070
+ logger.error(
1071
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1072
+ f"but the second sequence has a length {len(pair_ids)}. "
1073
+ f"Please select another truncation strategy than {truncation_strategy}, "
1074
+ "for instance 'longest_first' or 'only_first'."
1075
+ )
1076
+
1077
+ return (
1078
+ ids,
1079
+ token_boxes,
1080
+ pair_ids,
1081
+ pair_token_boxes,
1082
+ labels,
1083
+ overflowing_tokens,
1084
+ overflowing_token_boxes,
1085
+ overflowing_labels,
1086
+ )
1087
+
1088
+ def _pad(
1089
+ self,
1090
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1091
+ max_length: Optional[int] = None,
1092
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1093
+ pad_to_multiple_of: Optional[int] = None,
1094
+ return_attention_mask: Optional[bool] = None,
1095
+ ) -> dict:
1096
+ """
1097
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1098
+
1099
+ Args:
1100
+ encoded_inputs:
1101
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1102
+ max_length: maximum length of the returned list and optionally padding length (see below).
1103
+ Will truncate by taking into account the special tokens.
1104
+ padding_strategy: PaddingStrategy to use for padding.
1105
+
1106
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1107
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1108
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1109
+ The tokenizer padding sides are defined in self.padding_side:
1110
+
1111
+ - 'left': pads on the left of the sequences
1112
+ - 'right': pads on the right of the sequences
1113
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1114
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1115
+ `>= 7.5` (Volta).
1116
+ return_attention_mask:
1117
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1118
+ """
1119
+ # Load from model defaults
1120
+ if return_attention_mask is None:
1121
+ return_attention_mask = "attention_mask" in self.model_input_names
1122
+
1123
+ required_input = encoded_inputs[self.model_input_names[0]]
1124
+
1125
+ if padding_strategy == PaddingStrategy.LONGEST:
1126
+ max_length = len(required_input)
1127
+
1128
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1129
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1130
+
1131
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
1132
+
1133
+ # Initialize attention mask if not present.
1134
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1135
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
1136
+
1137
+ if needs_to_be_padded:
1138
+ difference = max_length - len(required_input)
1139
+ if self.padding_side == "right":
1140
+ if return_attention_mask:
1141
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1142
+ if "token_type_ids" in encoded_inputs:
1143
+ encoded_inputs["token_type_ids"] = (
1144
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
1145
+ )
1146
+ if "bbox" in encoded_inputs:
1147
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
1148
+ if "labels" in encoded_inputs:
1149
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
1150
+ if "special_tokens_mask" in encoded_inputs:
1151
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1152
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
1153
+ elif self.padding_side == "left":
1154
+ if return_attention_mask:
1155
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1156
+ if "token_type_ids" in encoded_inputs:
1157
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
1158
+ "token_type_ids"
1159
+ ]
1160
+ if "bbox" in encoded_inputs:
1161
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
1162
+ if "labels" in encoded_inputs:
1163
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
1164
+ if "special_tokens_mask" in encoded_inputs:
1165
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1166
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
1167
+ else:
1168
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1169
+
1170
+ return encoded_inputs
venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py ADDED
@@ -0,0 +1,800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for LayoutXLM model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_base import (
24
+ BatchEncoding,
25
+ EncodedInput,
26
+ PreTokenizedInput,
27
+ TextInput,
28
+ TextInputPair,
29
+ TruncationStrategy,
30
+ )
31
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
32
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging
33
+ from ..xlm_roberta.tokenization_xlm_roberta_fast import (
34
+ VOCAB_FILES_NAMES,
35
+ )
36
+
37
+
38
+ if is_sentencepiece_available():
39
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
40
+ else:
41
+ LayoutXLMTokenizer = None
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
47
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
48
+ Whether or not to encode the sequences with the special tokens relative to their model.
49
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
50
+ Activates and controls padding. Accepts the following values:
51
+
52
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
53
+ sequence if provided).
54
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
55
+ acceptable input length for the model if that argument is not provided.
56
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
57
+ lengths).
58
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
59
+ Activates and controls truncation. Accepts the following values:
60
+
61
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
62
+ to the maximum acceptable input length for the model if that argument is not provided. This will
63
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
64
+ sequences (or a batch of pairs) is provided.
65
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
66
+ maximum acceptable input length for the model if that argument is not provided. This will only
67
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
68
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
69
+ maximum acceptable input length for the model if that argument is not provided. This will only
70
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
71
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
72
+ greater than the model maximum admissible input size).
73
+ max_length (`int`, *optional*):
74
+ Controls the maximum length to use by one of the truncation/padding parameters.
75
+
76
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
77
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
78
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
79
+ stride (`int`, *optional*, defaults to 0):
80
+ If set to a number along with `max_length`, the overflowing tokens returned when
81
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
82
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
83
+ argument defines the number of overlapping tokens.
84
+ pad_to_multiple_of (`int`, *optional*):
85
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
86
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
87
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
88
+ If set, will return tensors instead of list of python integers. Acceptable values are:
89
+
90
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
91
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
92
+ - `'np'`: Return Numpy `np.ndarray` objects.
93
+ return_token_type_ids (`bool`, *optional*):
94
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
95
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
96
+
97
+ [What are token type IDs?](../glossary#token-type-ids)
98
+ return_attention_mask (`bool`, *optional*):
99
+ Whether to return the attention mask. If left to the default, will return the attention mask according
100
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
101
+
102
+ [What are attention masks?](../glossary#attention-mask)
103
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
104
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
105
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
106
+ of returning overflowing tokens.
107
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
108
+ Whether or not to return special tokens mask information.
109
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to return `(char_start, char_end)` for each token.
111
+
112
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
113
+ Python's tokenizer, this method will raise `NotImplementedError`.
114
+ return_length (`bool`, *optional*, defaults to `False`):
115
+ Whether or not to return the lengths of the encoded inputs.
116
+ verbose (`bool`, *optional*, defaults to `True`):
117
+ Whether or not to print more information and warnings.
118
+ **kwargs: passed to the `self.tokenize()` method
119
+
120
+ Return:
121
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
122
+
123
+ - **input_ids** -- List of token ids to be fed to a model.
124
+
125
+ [What are input IDs?](../glossary#input-ids)
126
+
127
+ - **bbox** -- List of bounding boxes to be fed to a model.
128
+
129
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
130
+ if *"token_type_ids"* is in `self.model_input_names`).
131
+
132
+ [What are token type IDs?](../glossary#token-type-ids)
133
+
134
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
135
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
136
+
137
+ [What are attention masks?](../glossary#attention-mask)
138
+
139
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
140
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
141
+ `return_overflowing_tokens=True`).
142
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
143
+ `return_overflowing_tokens=True`).
144
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
145
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
146
+ - **length** -- The length of the inputs (when `return_length=True`).
147
+ """
148
+
149
+
150
+ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
151
+ """
152
+ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
153
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
154
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
155
+
156
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
157
+ refer to this superclass for more information regarding those methods.
158
+
159
+ Args:
160
+ vocab_file (`str`):
161
+ Path to the vocabulary file.
162
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
163
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
164
+
165
+ <Tip>
166
+
167
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
168
+ sequence. The token used is the `cls_token`.
169
+
170
+ </Tip>
171
+
172
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
173
+ The end of sequence token.
174
+
175
+ <Tip>
176
+
177
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
178
+ The token used is the `sep_token`.
179
+
180
+ </Tip>
181
+
182
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
183
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
184
+ sequence classification or for a text and a question for question answering. It is also used as the last
185
+ token of a sequence built with special tokens.
186
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
187
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
188
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
189
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
190
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
191
+ token instead.
192
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
193
+ The token used for padding, for example when batching sequences of different lengths.
194
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
195
+ The token used for masking values. This is the token used when training this model with masked language
196
+ modeling. This is the token which the model will try to predict.
197
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
198
+ The bounding box to use for the special [CLS] token.
199
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
200
+ The bounding box to use for the special [SEP] token.
201
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
202
+ The bounding box to use for the special [PAD] token.
203
+ pad_token_label (`int`, *optional*, defaults to -100):
204
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
205
+ CrossEntropyLoss.
206
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
207
+ Whether or not to only label the first subword, in case word labels are provided.
208
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
209
+ Additional special tokens used by the tokenizer.
210
+ """
211
+
212
+ vocab_files_names = VOCAB_FILES_NAMES
213
+ model_input_names = ["input_ids", "attention_mask"]
214
+ slow_tokenizer_class = LayoutXLMTokenizer
215
+
216
+ def __init__(
217
+ self,
218
+ vocab_file=None,
219
+ tokenizer_file=None,
220
+ bos_token="<s>",
221
+ eos_token="</s>",
222
+ sep_token="</s>",
223
+ cls_token="<s>",
224
+ unk_token="<unk>",
225
+ pad_token="<pad>",
226
+ mask_token="<mask>",
227
+ cls_token_box=[0, 0, 0, 0],
228
+ sep_token_box=[1000, 1000, 1000, 1000],
229
+ pad_token_box=[0, 0, 0, 0],
230
+ pad_token_label=-100,
231
+ only_label_first_subword=True,
232
+ **kwargs,
233
+ ):
234
+ # Mask token behave like a normal word, i.e. include the space before it
235
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
236
+
237
+ super().__init__(
238
+ vocab_file,
239
+ tokenizer_file=tokenizer_file,
240
+ bos_token=bos_token,
241
+ eos_token=eos_token,
242
+ sep_token=sep_token,
243
+ cls_token=cls_token,
244
+ unk_token=unk_token,
245
+ pad_token=pad_token,
246
+ mask_token=mask_token,
247
+ cls_token_box=cls_token_box,
248
+ sep_token_box=sep_token_box,
249
+ pad_token_box=pad_token_box,
250
+ pad_token_label=pad_token_label,
251
+ only_label_first_subword=only_label_first_subword,
252
+ **kwargs,
253
+ )
254
+
255
+ self.vocab_file = vocab_file
256
+
257
+ # additional properties
258
+ self.cls_token_box = cls_token_box
259
+ self.sep_token_box = sep_token_box
260
+ self.pad_token_box = pad_token_box
261
+ self.pad_token_label = pad_token_label
262
+ self.only_label_first_subword = only_label_first_subword
263
+
264
+ @property
265
+ def can_save_slow_tokenizer(self) -> bool:
266
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
267
+
268
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
269
+ def __call__(
270
+ self,
271
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
272
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
273
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
274
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
275
+ add_special_tokens: bool = True,
276
+ padding: Union[bool, str, PaddingStrategy] = False,
277
+ truncation: Union[bool, str, TruncationStrategy] = None,
278
+ max_length: Optional[int] = None,
279
+ stride: int = 0,
280
+ pad_to_multiple_of: Optional[int] = None,
281
+ return_tensors: Optional[Union[str, TensorType]] = None,
282
+ return_token_type_ids: Optional[bool] = None,
283
+ return_attention_mask: Optional[bool] = None,
284
+ return_overflowing_tokens: bool = False,
285
+ return_special_tokens_mask: bool = False,
286
+ return_offsets_mapping: bool = False,
287
+ return_length: bool = False,
288
+ verbose: bool = True,
289
+ **kwargs,
290
+ ) -> BatchEncoding:
291
+ """
292
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
293
+ sequences with word-level normalized bounding boxes and optional labels.
294
+
295
+ Args:
296
+ text (`str`, `List[str]`, `List[List[str]]`):
297
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
298
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
299
+ words).
300
+ text_pair (`List[str]`, `List[List[str]]`):
301
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
302
+ (pretokenized string).
303
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
304
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
305
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
306
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
307
+ """
308
+
309
+ # Input type checking for clearer error
310
+ def _is_valid_text_input(t):
311
+ if isinstance(t, str):
312
+ # Strings are fine
313
+ return True
314
+ elif isinstance(t, (list, tuple)):
315
+ # List are fine as long as they are...
316
+ if len(t) == 0:
317
+ # ... empty
318
+ return True
319
+ elif isinstance(t[0], str):
320
+ # ... list of strings
321
+ return True
322
+ elif isinstance(t[0], (list, tuple)):
323
+ # ... list with an empty list or with a list of strings
324
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
325
+ else:
326
+ return False
327
+ else:
328
+ return False
329
+
330
+ if text_pair is not None:
331
+ # in case text + text_pair are provided, text = questions, text_pair = words
332
+ if not _is_valid_text_input(text):
333
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
334
+ if not isinstance(text_pair, (list, tuple)):
335
+ raise ValueError(
336
+ "words must of type `List[str]` (single pretokenized example), "
337
+ "or `List[List[str]]` (batch of pretokenized examples)."
338
+ )
339
+ else:
340
+ # in case only text is provided => must be words
341
+ if not isinstance(text, (list, tuple)):
342
+ raise ValueError(
343
+ "Words must of type `List[str]` (single pretokenized example), "
344
+ "or `List[List[str]]` (batch of pretokenized examples)."
345
+ )
346
+
347
+ if text_pair is not None:
348
+ is_batched = isinstance(text, (list, tuple))
349
+ else:
350
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
351
+
352
+ words = text if text_pair is None else text_pair
353
+ if boxes is None:
354
+ raise ValueError("You must provide corresponding bounding boxes")
355
+ if is_batched:
356
+ if len(words) != len(boxes):
357
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
358
+ for words_example, boxes_example in zip(words, boxes):
359
+ if len(words_example) != len(boxes_example):
360
+ raise ValueError("You must provide as many words as there are bounding boxes")
361
+ else:
362
+ if len(words) != len(boxes):
363
+ raise ValueError("You must provide as many words as there are bounding boxes")
364
+
365
+ if is_batched:
366
+ if text_pair is not None and len(text) != len(text_pair):
367
+ raise ValueError(
368
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
369
+ f" {len(text_pair)}."
370
+ )
371
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
372
+ is_pair = bool(text_pair is not None)
373
+ return self.batch_encode_plus(
374
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
375
+ is_pair=is_pair,
376
+ boxes=boxes,
377
+ word_labels=word_labels,
378
+ add_special_tokens=add_special_tokens,
379
+ padding=padding,
380
+ truncation=truncation,
381
+ max_length=max_length,
382
+ stride=stride,
383
+ pad_to_multiple_of=pad_to_multiple_of,
384
+ return_tensors=return_tensors,
385
+ return_token_type_ids=return_token_type_ids,
386
+ return_attention_mask=return_attention_mask,
387
+ return_overflowing_tokens=return_overflowing_tokens,
388
+ return_special_tokens_mask=return_special_tokens_mask,
389
+ return_offsets_mapping=return_offsets_mapping,
390
+ return_length=return_length,
391
+ verbose=verbose,
392
+ **kwargs,
393
+ )
394
+ else:
395
+ return self.encode_plus(
396
+ text=text,
397
+ text_pair=text_pair,
398
+ boxes=boxes,
399
+ word_labels=word_labels,
400
+ add_special_tokens=add_special_tokens,
401
+ padding=padding,
402
+ truncation=truncation,
403
+ max_length=max_length,
404
+ stride=stride,
405
+ pad_to_multiple_of=pad_to_multiple_of,
406
+ return_tensors=return_tensors,
407
+ return_token_type_ids=return_token_type_ids,
408
+ return_attention_mask=return_attention_mask,
409
+ return_overflowing_tokens=return_overflowing_tokens,
410
+ return_special_tokens_mask=return_special_tokens_mask,
411
+ return_offsets_mapping=return_offsets_mapping,
412
+ return_length=return_length,
413
+ verbose=verbose,
414
+ **kwargs,
415
+ )
416
+
417
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
418
+ batched_input = [(text, pair)] if pair else [text]
419
+ encodings = self._tokenizer.encode_batch(
420
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
421
+ )
422
+
423
+ return encodings[0].tokens
424
+
425
+ def _batch_encode_plus(
426
+ self,
427
+ batch_text_or_text_pairs: Union[
428
+ List[TextInput],
429
+ List[TextInputPair],
430
+ List[PreTokenizedInput],
431
+ ],
432
+ is_pair: bool = None,
433
+ boxes: Optional[List[List[List[int]]]] = None,
434
+ word_labels: Optional[List[List[int]]] = None,
435
+ add_special_tokens: bool = True,
436
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
437
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
438
+ max_length: Optional[int] = None,
439
+ stride: int = 0,
440
+ pad_to_multiple_of: Optional[int] = None,
441
+ return_tensors: Optional[str] = None,
442
+ return_token_type_ids: Optional[bool] = None,
443
+ return_attention_mask: Optional[bool] = None,
444
+ return_overflowing_tokens: bool = False,
445
+ return_special_tokens_mask: bool = False,
446
+ return_offsets_mapping: bool = False,
447
+ return_length: bool = False,
448
+ verbose: bool = True,
449
+ **kwargs,
450
+ ) -> BatchEncoding:
451
+ if not isinstance(batch_text_or_text_pairs, list):
452
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
453
+
454
+ # Set the truncation and padding strategy and restore the initial configuration
455
+ self.set_truncation_and_padding(
456
+ padding_strategy=padding_strategy,
457
+ truncation_strategy=truncation_strategy,
458
+ max_length=max_length,
459
+ stride=stride,
460
+ pad_to_multiple_of=pad_to_multiple_of,
461
+ )
462
+
463
+ if is_pair:
464
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
465
+
466
+ encodings = self._tokenizer.encode_batch(
467
+ batch_text_or_text_pairs,
468
+ add_special_tokens=add_special_tokens,
469
+ is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
470
+ )
471
+
472
+ # Convert encoding to dict
473
+ # `Tokens` has type: Tuple[
474
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
475
+ # List[EncodingFast]
476
+ # ]
477
+ # with nested dimensions corresponding to batch, overflows, sequence length
478
+ tokens_and_encodings = [
479
+ self._convert_encoding(
480
+ encoding=encoding,
481
+ return_token_type_ids=return_token_type_ids,
482
+ return_attention_mask=return_attention_mask,
483
+ return_overflowing_tokens=return_overflowing_tokens,
484
+ return_special_tokens_mask=return_special_tokens_mask,
485
+ return_offsets_mapping=True
486
+ if word_labels is not None
487
+ else return_offsets_mapping, # we use offsets to create the labels
488
+ return_length=return_length,
489
+ verbose=verbose,
490
+ )
491
+ for encoding in encodings
492
+ ]
493
+
494
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
495
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
496
+ # (we say ~ because the number of overflow varies with the example in the batch)
497
+ #
498
+ # To match each overflowing sample with the original sample in the batch
499
+ # we add an overflow_to_sample_mapping array (see below)
500
+ sanitized_tokens = {}
501
+ for key in tokens_and_encodings[0][0].keys():
502
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
503
+ sanitized_tokens[key] = stack
504
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
505
+
506
+ # If returning overflowing tokens, we need to return a mapping
507
+ # from the batch idx to the original sample
508
+ if return_overflowing_tokens:
509
+ overflow_to_sample_mapping = []
510
+ for i, (toks, _) in enumerate(tokens_and_encodings):
511
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
512
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
513
+
514
+ for input_ids in sanitized_tokens["input_ids"]:
515
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
516
+
517
+ # create the token boxes
518
+ token_boxes = []
519
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
520
+ if return_overflowing_tokens:
521
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
522
+ else:
523
+ original_index = batch_index
524
+ token_boxes_example = []
525
+ for id, sequence_id, word_id in zip(
526
+ sanitized_tokens["input_ids"][batch_index],
527
+ sanitized_encodings[batch_index].sequence_ids,
528
+ sanitized_encodings[batch_index].word_ids,
529
+ ):
530
+ if word_id is not None:
531
+ if is_pair and sequence_id == 0:
532
+ token_boxes_example.append(self.pad_token_box)
533
+ else:
534
+ token_boxes_example.append(boxes[original_index][word_id])
535
+ else:
536
+ if id == self.cls_token_id:
537
+ token_boxes_example.append(self.cls_token_box)
538
+ elif id == self.sep_token_id:
539
+ token_boxes_example.append(self.sep_token_box)
540
+ elif id == self.pad_token_id:
541
+ token_boxes_example.append(self.pad_token_box)
542
+ else:
543
+ raise ValueError("Id not recognized")
544
+ token_boxes.append(token_boxes_example)
545
+
546
+ sanitized_tokens["bbox"] = token_boxes
547
+
548
+ # optionally, create the labels
549
+ if word_labels is not None:
550
+ labels = []
551
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
552
+ if return_overflowing_tokens:
553
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
554
+ else:
555
+ original_index = batch_index
556
+ labels_example = []
557
+ for id, offset, word_id in zip(
558
+ sanitized_tokens["input_ids"][batch_index],
559
+ sanitized_tokens["offset_mapping"][batch_index],
560
+ sanitized_encodings[batch_index].word_ids,
561
+ ):
562
+ if word_id is not None:
563
+ if self.only_label_first_subword:
564
+ if offset[0] == 0:
565
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
566
+ labels_example.append(word_labels[original_index][word_id])
567
+ else:
568
+ labels_example.append(self.pad_token_label)
569
+ else:
570
+ labels_example.append(word_labels[original_index][word_id])
571
+ else:
572
+ labels_example.append(self.pad_token_label)
573
+ labels.append(labels_example)
574
+
575
+ sanitized_tokens["labels"] = labels
576
+ # finally, remove offsets if the user didn't want them
577
+ if not return_offsets_mapping:
578
+ del sanitized_tokens["offset_mapping"]
579
+
580
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
581
+
582
+ def _encode_plus(
583
+ self,
584
+ text: Union[TextInput, PreTokenizedInput],
585
+ text_pair: Optional[PreTokenizedInput] = None,
586
+ boxes: Optional[List[List[int]]] = None,
587
+ word_labels: Optional[List[int]] = None,
588
+ add_special_tokens: bool = True,
589
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
590
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
591
+ max_length: Optional[int] = None,
592
+ stride: int = 0,
593
+ pad_to_multiple_of: Optional[int] = None,
594
+ return_tensors: Optional[bool] = None,
595
+ return_token_type_ids: Optional[bool] = None,
596
+ return_attention_mask: Optional[bool] = None,
597
+ return_overflowing_tokens: bool = False,
598
+ return_special_tokens_mask: bool = False,
599
+ return_offsets_mapping: bool = False,
600
+ return_length: bool = False,
601
+ verbose: bool = True,
602
+ **kwargs,
603
+ ) -> BatchEncoding:
604
+ # make it a batched input
605
+ # 2 options:
606
+ # 1) only text, in case text must be a list of str
607
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
608
+ batched_input = [(text, text_pair)] if text_pair else [text]
609
+ batched_boxes = [boxes]
610
+ batched_word_labels = [word_labels] if word_labels is not None else None
611
+ batched_output = self._batch_encode_plus(
612
+ batched_input,
613
+ is_pair=bool(text_pair is not None),
614
+ boxes=batched_boxes,
615
+ word_labels=batched_word_labels,
616
+ add_special_tokens=add_special_tokens,
617
+ padding_strategy=padding_strategy,
618
+ truncation_strategy=truncation_strategy,
619
+ max_length=max_length,
620
+ stride=stride,
621
+ pad_to_multiple_of=pad_to_multiple_of,
622
+ return_tensors=return_tensors,
623
+ return_token_type_ids=return_token_type_ids,
624
+ return_attention_mask=return_attention_mask,
625
+ return_overflowing_tokens=return_overflowing_tokens,
626
+ return_special_tokens_mask=return_special_tokens_mask,
627
+ return_offsets_mapping=return_offsets_mapping,
628
+ return_length=return_length,
629
+ verbose=verbose,
630
+ **kwargs,
631
+ )
632
+
633
+ # Return tensor is None, then we can remove the leading batch axis
634
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
635
+ if return_tensors is None and not return_overflowing_tokens:
636
+ batched_output = BatchEncoding(
637
+ {
638
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
639
+ for key, value in batched_output.items()
640
+ },
641
+ batched_output.encodings,
642
+ )
643
+
644
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
645
+
646
+ return batched_output
647
+
648
+ def _pad(
649
+ self,
650
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
651
+ max_length: Optional[int] = None,
652
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
653
+ pad_to_multiple_of: Optional[int] = None,
654
+ return_attention_mask: Optional[bool] = None,
655
+ ) -> dict:
656
+ """
657
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
658
+
659
+ Args:
660
+ encoded_inputs:
661
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
662
+ max_length: maximum length of the returned list and optionally padding length (see below).
663
+ Will truncate by taking into account the special tokens.
664
+ padding_strategy: PaddingStrategy to use for padding.
665
+
666
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
667
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
668
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
669
+ The tokenizer padding sides are defined in self.padding_side:
670
+
671
+ - 'left': pads on the left of the sequences
672
+ - 'right': pads on the right of the sequences
673
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
674
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
675
+ `>= 7.5` (Volta).
676
+ return_attention_mask:
677
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
678
+ """
679
+ # Load from model defaults
680
+ if return_attention_mask is None:
681
+ return_attention_mask = "attention_mask" in self.model_input_names
682
+
683
+ required_input = encoded_inputs[self.model_input_names[0]]
684
+
685
+ if padding_strategy == PaddingStrategy.LONGEST:
686
+ max_length = len(required_input)
687
+
688
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
689
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
690
+
691
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
692
+
693
+ # Initialize attention mask if not present.
694
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
695
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
696
+
697
+ if needs_to_be_padded:
698
+ difference = max_length - len(required_input)
699
+ if self.padding_side == "right":
700
+ if return_attention_mask:
701
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
702
+ if "token_type_ids" in encoded_inputs:
703
+ encoded_inputs["token_type_ids"] = (
704
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
705
+ )
706
+ if "bbox" in encoded_inputs:
707
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
708
+ if "labels" in encoded_inputs:
709
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
710
+ if "special_tokens_mask" in encoded_inputs:
711
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
712
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
713
+ elif self.padding_side == "left":
714
+ if return_attention_mask:
715
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
716
+ if "token_type_ids" in encoded_inputs:
717
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
718
+ "token_type_ids"
719
+ ]
720
+ if "bbox" in encoded_inputs:
721
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
722
+ if "labels" in encoded_inputs:
723
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
724
+ if "special_tokens_mask" in encoded_inputs:
725
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
726
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
727
+ else:
728
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
729
+
730
+ return encoded_inputs
731
+
732
+ def build_inputs_with_special_tokens(
733
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
734
+ ) -> List[int]:
735
+ """
736
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
737
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
738
+
739
+ - single sequence: `<s> X </s>`
740
+ - pair of sequences: `<s> A </s></s> B </s>`
741
+
742
+ Args:
743
+ token_ids_0 (`List[int]`):
744
+ List of IDs to which the special tokens will be added.
745
+ token_ids_1 (`List[int]`, *optional*):
746
+ Optional second list of IDs for sequence pairs.
747
+
748
+ Returns:
749
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
750
+ """
751
+
752
+ if token_ids_1 is None:
753
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
754
+ cls = [self.cls_token_id]
755
+ sep = [self.sep_token_id]
756
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
757
+
758
+ def create_token_type_ids_from_sequences(
759
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
760
+ ) -> List[int]:
761
+ """
762
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
763
+ not make use of token type ids, therefore a list of zeros is returned.
764
+
765
+ Args:
766
+ token_ids_0 (`List[int]`):
767
+ List of IDs.
768
+ token_ids_1 (`List[int]`, *optional*):
769
+ Optional second list of IDs for sequence pairs.
770
+
771
+ Returns:
772
+ `List[int]`: List of zeros.
773
+
774
+ """
775
+
776
+ sep = [self.sep_token_id]
777
+ cls = [self.cls_token_id]
778
+
779
+ if token_ids_1 is None:
780
+ return len(cls + token_ids_0 + sep) * [0]
781
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
782
+
783
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
784
+ if not self.can_save_slow_tokenizer:
785
+ raise ValueError(
786
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
787
+ "tokenizer."
788
+ )
789
+
790
+ if not os.path.isdir(save_directory):
791
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
792
+ return
793
+ out_vocab_file = os.path.join(
794
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
795
+ )
796
+
797
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
798
+ copyfile(self.vocab_file, out_vocab_file)
799
+
800
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/mbart/__init__.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_mbart"] = ["MBartTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_mbart_fast"] = ["MBartTokenizerFast"]
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ _import_structure["modeling_mbart"] = [
52
+ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
53
+ "MBartForCausalLM",
54
+ "MBartForConditionalGeneration",
55
+ "MBartForQuestionAnswering",
56
+ "MBartForSequenceClassification",
57
+ "MBartModel",
58
+ "MBartPreTrainedModel",
59
+ ]
60
+
61
+ try:
62
+ if not is_tf_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_tf_mbart"] = [
68
+ "TFMBartForConditionalGeneration",
69
+ "TFMBartModel",
70
+ "TFMBartPreTrainedModel",
71
+ ]
72
+
73
+ try:
74
+ if not is_flax_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ _import_structure["modeling_flax_mbart"] = [
80
+ "FlaxMBartForConditionalGeneration",
81
+ "FlaxMBartForQuestionAnswering",
82
+ "FlaxMBartForSequenceClassification",
83
+ "FlaxMBartModel",
84
+ "FlaxMBartPreTrainedModel",
85
+ ]
86
+
87
+
88
+ if TYPE_CHECKING:
89
+ from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
90
+
91
+ try:
92
+ if not is_sentencepiece_available():
93
+ raise OptionalDependencyNotAvailable()
94
+ except OptionalDependencyNotAvailable:
95
+ pass
96
+ else:
97
+ from .tokenization_mbart import MBartTokenizer
98
+
99
+ try:
100
+ if not is_tokenizers_available():
101
+ raise OptionalDependencyNotAvailable()
102
+ except OptionalDependencyNotAvailable:
103
+ pass
104
+ else:
105
+ from .tokenization_mbart_fast import MBartTokenizerFast
106
+
107
+ try:
108
+ if not is_torch_available():
109
+ raise OptionalDependencyNotAvailable()
110
+ except OptionalDependencyNotAvailable:
111
+ pass
112
+ else:
113
+ from .modeling_mbart import (
114
+ MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
115
+ MBartForCausalLM,
116
+ MBartForConditionalGeneration,
117
+ MBartForQuestionAnswering,
118
+ MBartForSequenceClassification,
119
+ MBartModel,
120
+ MBartPreTrainedModel,
121
+ )
122
+
123
+ try:
124
+ if not is_tf_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
130
+
131
+ try:
132
+ if not is_flax_available():
133
+ raise OptionalDependencyNotAvailable()
134
+ except OptionalDependencyNotAvailable:
135
+ pass
136
+ else:
137
+ from .modeling_flax_mbart import (
138
+ FlaxMBartForConditionalGeneration,
139
+ FlaxMBartForQuestionAnswering,
140
+ FlaxMBartForSequenceClassification,
141
+ FlaxMBartModel,
142
+ FlaxMBartPreTrainedModel,
143
+ )
144
+
145
+ else:
146
+ import sys
147
+
148
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.12 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/convert_mbart_original_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc ADDED
Binary file (48.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc ADDED
Binary file (66.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc ADDED
Binary file (51.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc ADDED
Binary file (9.22 kB). View file