applied-ai-018 commited on
Commit
0ebaab1
·
verified ·
1 Parent(s): 43538b0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/9.post_attention_layernorm.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/transformers/data/datasets/glue.py +161 -0
  12. venv/lib/python3.10/site-packages/transformers/data/processors/__init__.py +18 -0
  13. venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/data/processors/glue.py +643 -0
  19. venv/lib/python3.10/site-packages/transformers/data/processors/squad.py +845 -0
  20. venv/lib/python3.10/site-packages/transformers/data/processors/utils.py +349 -0
  21. venv/lib/python3.10/site-packages/transformers/data/processors/xnli.py +97 -0
  22. venv/lib/python3.10/site-packages/transformers/onnx/__init__.py +49 -0
  23. venv/lib/python3.10/site-packages/transformers/onnx/__main__.py +242 -0
  24. venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/onnx/config.py +741 -0
  31. venv/lib/python3.10/site-packages/transformers/onnx/convert.py +460 -0
  32. venv/lib/python3.10/site-packages/transformers/onnx/features.py +749 -0
  33. venv/lib/python3.10/site-packages/transformers/onnx/utils.py +109 -0
  34. venv/lib/python3.10/site-packages/transformers/quantizers/__init__.py +15 -0
  35. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/quantizers/auto.py +161 -0
  46. venv/lib/python3.10/site-packages/transformers/quantizers/base.py +213 -0
  47. venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py +98 -0
  48. venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py +124 -0
  49. venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py +317 -0
  50. venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py +285 -0
ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b5aaa4a9b2d2e1b5ceeb53749e127800f86c8aa5e016482a9534a82ca859ff3
3
+ size 50332828
ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e8bafe566227b5eaee5a5cd13fa86d2ece4511bd3aa07e9a9f8ff19634301a
3
+ size 50332843
ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a375f12df0268d460178f978a3df67864885bc7826e1e3cd7efc12c82f39fe3b
3
+ size 50332749
ckpts/universal/global_step40/zero/9.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56892653a52e4bcdb41de2d759d8c75f2b8408862fd6b787eeccb121b2dbffa5
3
+ size 9293
venv/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc ADDED
Binary file (46.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (555 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc ADDED
Binary file (6.36 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/datasets/glue.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import time
17
+ import warnings
18
+ from dataclasses import dataclass, field
19
+ from enum import Enum
20
+ from typing import List, Optional, Union
21
+
22
+ import torch
23
+ from filelock import FileLock
24
+ from torch.utils.data import Dataset
25
+
26
+ from ...tokenization_utils_base import PreTrainedTokenizerBase
27
+ from ...utils import logging
28
+ from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
29
+ from ..processors.utils import InputFeatures
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class GlueDataTrainingArguments:
37
+ """
38
+ Arguments pertaining to what data we are going to input our model for training and eval.
39
+
40
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
41
+ line.
42
+ """
43
+
44
+ task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
45
+ data_dir: str = field(
46
+ metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
47
+ )
48
+ max_seq_length: int = field(
49
+ default=128,
50
+ metadata={
51
+ "help": (
52
+ "The maximum total input sequence length after tokenization. Sequences longer "
53
+ "than this will be truncated, sequences shorter will be padded."
54
+ )
55
+ },
56
+ )
57
+ overwrite_cache: bool = field(
58
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
59
+ )
60
+
61
+ def __post_init__(self):
62
+ self.task_name = self.task_name.lower()
63
+
64
+
65
+ class Split(Enum):
66
+ train = "train"
67
+ dev = "dev"
68
+ test = "test"
69
+
70
+
71
+ class GlueDataset(Dataset):
72
+ """
73
+ This will be superseded by a framework-agnostic approach soon.
74
+ """
75
+
76
+ args: GlueDataTrainingArguments
77
+ output_mode: str
78
+ features: List[InputFeatures]
79
+
80
+ def __init__(
81
+ self,
82
+ args: GlueDataTrainingArguments,
83
+ tokenizer: PreTrainedTokenizerBase,
84
+ limit_length: Optional[int] = None,
85
+ mode: Union[str, Split] = Split.train,
86
+ cache_dir: Optional[str] = None,
87
+ ):
88
+ warnings.warn(
89
+ "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
90
+ "library. You can have a look at this example script for pointers: "
91
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
92
+ FutureWarning,
93
+ )
94
+ self.args = args
95
+ self.processor = glue_processors[args.task_name]()
96
+ self.output_mode = glue_output_modes[args.task_name]
97
+ if isinstance(mode, str):
98
+ try:
99
+ mode = Split[mode]
100
+ except KeyError:
101
+ raise KeyError("mode is not a valid split name")
102
+ # Load data features from cache or dataset file
103
+ cached_features_file = os.path.join(
104
+ cache_dir if cache_dir is not None else args.data_dir,
105
+ f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}",
106
+ )
107
+ label_list = self.processor.get_labels()
108
+ if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
109
+ "RobertaTokenizer",
110
+ "RobertaTokenizerFast",
111
+ "XLMRobertaTokenizer",
112
+ "BartTokenizer",
113
+ "BartTokenizerFast",
114
+ ):
115
+ # HACK(label indices are swapped in RoBERTa pretrained model)
116
+ label_list[1], label_list[2] = label_list[2], label_list[1]
117
+ self.label_list = label_list
118
+
119
+ # Make sure only the first process in distributed training processes the dataset,
120
+ # and the others will use the cache.
121
+ lock_path = cached_features_file + ".lock"
122
+ with FileLock(lock_path):
123
+ if os.path.exists(cached_features_file) and not args.overwrite_cache:
124
+ start = time.time()
125
+ self.features = torch.load(cached_features_file)
126
+ logger.info(
127
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
128
+ )
129
+ else:
130
+ logger.info(f"Creating features from dataset file at {args.data_dir}")
131
+
132
+ if mode == Split.dev:
133
+ examples = self.processor.get_dev_examples(args.data_dir)
134
+ elif mode == Split.test:
135
+ examples = self.processor.get_test_examples(args.data_dir)
136
+ else:
137
+ examples = self.processor.get_train_examples(args.data_dir)
138
+ if limit_length is not None:
139
+ examples = examples[:limit_length]
140
+ self.features = glue_convert_examples_to_features(
141
+ examples,
142
+ tokenizer,
143
+ max_length=args.max_seq_length,
144
+ label_list=label_list,
145
+ output_mode=self.output_mode,
146
+ )
147
+ start = time.time()
148
+ torch.save(self.features, cached_features_file)
149
+ # ^ This seems to take a lot of time so I want to investigate why and how we can improve.
150
+ logger.info(
151
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
152
+ )
153
+
154
+ def __len__(self):
155
+ return len(self.features)
156
+
157
+ def __getitem__(self, i) -> InputFeatures:
158
+ return self.features[i]
159
+
160
+ def get_labels(self):
161
+ return self.label_list
venv/lib/python3.10/site-packages/transformers/data/processors/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
16
+ from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
17
+ from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
18
+ from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (751 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc ADDED
Binary file (2.53 kB). View file
 
venv/lib/python3.10/site-packages/transformers/data/processors/glue.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ GLUE processors and helpers"""
17
+
18
+ import os
19
+ import warnings
20
+ from dataclasses import asdict
21
+ from enum import Enum
22
+ from typing import List, Optional, Union
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import is_tf_available, logging
26
+ from .utils import DataProcessor, InputExample, InputFeatures
27
+
28
+
29
+ if is_tf_available():
30
+ import tensorflow as tf
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ DEPRECATION_WARNING = (
35
+ "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
36
+ "library. You can have a look at this example script for pointers: "
37
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
38
+ )
39
+
40
+
41
+ def glue_convert_examples_to_features(
42
+ examples: Union[List[InputExample], "tf.data.Dataset"],
43
+ tokenizer: PreTrainedTokenizer,
44
+ max_length: Optional[int] = None,
45
+ task=None,
46
+ label_list=None,
47
+ output_mode=None,
48
+ ):
49
+ """
50
+ Loads a data file into a list of `InputFeatures`
51
+
52
+ Args:
53
+ examples: List of `InputExamples` or `tf.data.Dataset` containing the examples.
54
+ tokenizer: Instance of a tokenizer that will tokenize the examples
55
+ max_length: Maximum example length. Defaults to the tokenizer's max_len
56
+ task: GLUE task
57
+ label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method
58
+ output_mode: String indicating the output mode. Either `regression` or `classification`
59
+
60
+ Returns:
61
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific
62
+ features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which
63
+ can be fed to the model.
64
+
65
+ """
66
+ warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning)
67
+ if is_tf_available() and isinstance(examples, tf.data.Dataset):
68
+ if task is None:
69
+ raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.")
70
+ return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
71
+ return _glue_convert_examples_to_features(
72
+ examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode
73
+ )
74
+
75
+
76
+ if is_tf_available():
77
+
78
+ def _tf_glue_convert_examples_to_features(
79
+ examples: tf.data.Dataset,
80
+ tokenizer: PreTrainedTokenizer,
81
+ task=str,
82
+ max_length: Optional[int] = None,
83
+ ) -> tf.data.Dataset:
84
+ """
85
+ Returns:
86
+ A `tf.data.Dataset` containing the task-specific features.
87
+
88
+ """
89
+ processor = glue_processors[task]()
90
+ examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples]
91
+ features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
92
+ label_type = tf.float32 if task == "sts-b" else tf.int64
93
+
94
+ def gen():
95
+ for ex in features:
96
+ d = {k: v for k, v in asdict(ex).items() if v is not None}
97
+ label = d.pop("label")
98
+ yield (d, label)
99
+
100
+ input_names = tokenizer.model_input_names
101
+
102
+ return tf.data.Dataset.from_generator(
103
+ gen,
104
+ ({k: tf.int32 for k in input_names}, label_type),
105
+ ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])),
106
+ )
107
+
108
+
109
+ def _glue_convert_examples_to_features(
110
+ examples: List[InputExample],
111
+ tokenizer: PreTrainedTokenizer,
112
+ max_length: Optional[int] = None,
113
+ task=None,
114
+ label_list=None,
115
+ output_mode=None,
116
+ ):
117
+ if max_length is None:
118
+ max_length = tokenizer.model_max_length
119
+
120
+ if task is not None:
121
+ processor = glue_processors[task]()
122
+ if label_list is None:
123
+ label_list = processor.get_labels()
124
+ logger.info(f"Using label list {label_list} for task {task}")
125
+ if output_mode is None:
126
+ output_mode = glue_output_modes[task]
127
+ logger.info(f"Using output mode {output_mode} for task {task}")
128
+
129
+ label_map = {label: i for i, label in enumerate(label_list)}
130
+
131
+ def label_from_example(example: InputExample) -> Union[int, float, None]:
132
+ if example.label is None:
133
+ return None
134
+ if output_mode == "classification":
135
+ return label_map[example.label]
136
+ elif output_mode == "regression":
137
+ return float(example.label)
138
+ raise KeyError(output_mode)
139
+
140
+ labels = [label_from_example(example) for example in examples]
141
+
142
+ batch_encoding = tokenizer(
143
+ [(example.text_a, example.text_b) for example in examples],
144
+ max_length=max_length,
145
+ padding="max_length",
146
+ truncation=True,
147
+ )
148
+
149
+ features = []
150
+ for i in range(len(examples)):
151
+ inputs = {k: batch_encoding[k][i] for k in batch_encoding}
152
+
153
+ feature = InputFeatures(**inputs, label=labels[i])
154
+ features.append(feature)
155
+
156
+ for i, example in enumerate(examples[:5]):
157
+ logger.info("*** Example ***")
158
+ logger.info(f"guid: {example.guid}")
159
+ logger.info(f"features: {features[i]}")
160
+
161
+ return features
162
+
163
+
164
+ class OutputMode(Enum):
165
+ classification = "classification"
166
+ regression = "regression"
167
+
168
+
169
+ class MrpcProcessor(DataProcessor):
170
+ """Processor for the MRPC data set (GLUE version)."""
171
+
172
+ def __init__(self, *args, **kwargs):
173
+ super().__init__(*args, **kwargs)
174
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
175
+
176
+ def get_example_from_tensor_dict(self, tensor_dict):
177
+ """See base class."""
178
+ return InputExample(
179
+ tensor_dict["idx"].numpy(),
180
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
181
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
182
+ str(tensor_dict["label"].numpy()),
183
+ )
184
+
185
+ def get_train_examples(self, data_dir):
186
+ """See base class."""
187
+ logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
188
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
189
+
190
+ def get_dev_examples(self, data_dir):
191
+ """See base class."""
192
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
193
+
194
+ def get_test_examples(self, data_dir):
195
+ """See base class."""
196
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
197
+
198
+ def get_labels(self):
199
+ """See base class."""
200
+ return ["0", "1"]
201
+
202
+ def _create_examples(self, lines, set_type):
203
+ """Creates examples for the training, dev and test sets."""
204
+ examples = []
205
+ for i, line in enumerate(lines):
206
+ if i == 0:
207
+ continue
208
+ guid = f"{set_type}-{i}"
209
+ text_a = line[3]
210
+ text_b = line[4]
211
+ label = None if set_type == "test" else line[0]
212
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
213
+ return examples
214
+
215
+
216
+ class MnliProcessor(DataProcessor):
217
+ """Processor for the MultiNLI data set (GLUE version)."""
218
+
219
+ def __init__(self, *args, **kwargs):
220
+ super().__init__(*args, **kwargs)
221
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
222
+
223
+ def get_example_from_tensor_dict(self, tensor_dict):
224
+ """See base class."""
225
+ return InputExample(
226
+ tensor_dict["idx"].numpy(),
227
+ tensor_dict["premise"].numpy().decode("utf-8"),
228
+ tensor_dict["hypothesis"].numpy().decode("utf-8"),
229
+ str(tensor_dict["label"].numpy()),
230
+ )
231
+
232
+ def get_train_examples(self, data_dir):
233
+ """See base class."""
234
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
235
+
236
+ def get_dev_examples(self, data_dir):
237
+ """See base class."""
238
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
239
+
240
+ def get_test_examples(self, data_dir):
241
+ """See base class."""
242
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched")
243
+
244
+ def get_labels(self):
245
+ """See base class."""
246
+ return ["contradiction", "entailment", "neutral"]
247
+
248
+ def _create_examples(self, lines, set_type):
249
+ """Creates examples for the training, dev and test sets."""
250
+ examples = []
251
+ for i, line in enumerate(lines):
252
+ if i == 0:
253
+ continue
254
+ guid = f"{set_type}-{line[0]}"
255
+ text_a = line[8]
256
+ text_b = line[9]
257
+ label = None if set_type.startswith("test") else line[-1]
258
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
259
+ return examples
260
+
261
+
262
+ class MnliMismatchedProcessor(MnliProcessor):
263
+ """Processor for the MultiNLI Mismatched data set (GLUE version)."""
264
+
265
+ def __init__(self, *args, **kwargs):
266
+ super().__init__(*args, **kwargs)
267
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
268
+
269
+ def get_dev_examples(self, data_dir):
270
+ """See base class."""
271
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched")
272
+
273
+ def get_test_examples(self, data_dir):
274
+ """See base class."""
275
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched")
276
+
277
+
278
+ class ColaProcessor(DataProcessor):
279
+ """Processor for the CoLA data set (GLUE version)."""
280
+
281
+ def __init__(self, *args, **kwargs):
282
+ super().__init__(*args, **kwargs)
283
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
284
+
285
+ def get_example_from_tensor_dict(self, tensor_dict):
286
+ """See base class."""
287
+ return InputExample(
288
+ tensor_dict["idx"].numpy(),
289
+ tensor_dict["sentence"].numpy().decode("utf-8"),
290
+ None,
291
+ str(tensor_dict["label"].numpy()),
292
+ )
293
+
294
+ def get_train_examples(self, data_dir):
295
+ """See base class."""
296
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
297
+
298
+ def get_dev_examples(self, data_dir):
299
+ """See base class."""
300
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
301
+
302
+ def get_test_examples(self, data_dir):
303
+ """See base class."""
304
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
305
+
306
+ def get_labels(self):
307
+ """See base class."""
308
+ return ["0", "1"]
309
+
310
+ def _create_examples(self, lines, set_type):
311
+ """Creates examples for the training, dev and test sets."""
312
+ test_mode = set_type == "test"
313
+ if test_mode:
314
+ lines = lines[1:]
315
+ text_index = 1 if test_mode else 3
316
+ examples = []
317
+ for i, line in enumerate(lines):
318
+ guid = f"{set_type}-{i}"
319
+ text_a = line[text_index]
320
+ label = None if test_mode else line[1]
321
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
322
+ return examples
323
+
324
+
325
+ class Sst2Processor(DataProcessor):
326
+ """Processor for the SST-2 data set (GLUE version)."""
327
+
328
+ def __init__(self, *args, **kwargs):
329
+ super().__init__(*args, **kwargs)
330
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
331
+
332
+ def get_example_from_tensor_dict(self, tensor_dict):
333
+ """See base class."""
334
+ return InputExample(
335
+ tensor_dict["idx"].numpy(),
336
+ tensor_dict["sentence"].numpy().decode("utf-8"),
337
+ None,
338
+ str(tensor_dict["label"].numpy()),
339
+ )
340
+
341
+ def get_train_examples(self, data_dir):
342
+ """See base class."""
343
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
344
+
345
+ def get_dev_examples(self, data_dir):
346
+ """See base class."""
347
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
348
+
349
+ def get_test_examples(self, data_dir):
350
+ """See base class."""
351
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
352
+
353
+ def get_labels(self):
354
+ """See base class."""
355
+ return ["0", "1"]
356
+
357
+ def _create_examples(self, lines, set_type):
358
+ """Creates examples for the training, dev and test sets."""
359
+ examples = []
360
+ text_index = 1 if set_type == "test" else 0
361
+ for i, line in enumerate(lines):
362
+ if i == 0:
363
+ continue
364
+ guid = f"{set_type}-{i}"
365
+ text_a = line[text_index]
366
+ label = None if set_type == "test" else line[1]
367
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
368
+ return examples
369
+
370
+
371
+ class StsbProcessor(DataProcessor):
372
+ """Processor for the STS-B data set (GLUE version)."""
373
+
374
+ def __init__(self, *args, **kwargs):
375
+ super().__init__(*args, **kwargs)
376
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
377
+
378
+ def get_example_from_tensor_dict(self, tensor_dict):
379
+ """See base class."""
380
+ return InputExample(
381
+ tensor_dict["idx"].numpy(),
382
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
383
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
384
+ str(tensor_dict["label"].numpy()),
385
+ )
386
+
387
+ def get_train_examples(self, data_dir):
388
+ """See base class."""
389
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
390
+
391
+ def get_dev_examples(self, data_dir):
392
+ """See base class."""
393
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
394
+
395
+ def get_test_examples(self, data_dir):
396
+ """See base class."""
397
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
398
+
399
+ def get_labels(self):
400
+ """See base class."""
401
+ return [None]
402
+
403
+ def _create_examples(self, lines, set_type):
404
+ """Creates examples for the training, dev and test sets."""
405
+ examples = []
406
+ for i, line in enumerate(lines):
407
+ if i == 0:
408
+ continue
409
+ guid = f"{set_type}-{line[0]}"
410
+ text_a = line[7]
411
+ text_b = line[8]
412
+ label = None if set_type == "test" else line[-1]
413
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
414
+ return examples
415
+
416
+
417
+ class QqpProcessor(DataProcessor):
418
+ """Processor for the QQP data set (GLUE version)."""
419
+
420
+ def __init__(self, *args, **kwargs):
421
+ super().__init__(*args, **kwargs)
422
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
423
+
424
+ def get_example_from_tensor_dict(self, tensor_dict):
425
+ """See base class."""
426
+ return InputExample(
427
+ tensor_dict["idx"].numpy(),
428
+ tensor_dict["question1"].numpy().decode("utf-8"),
429
+ tensor_dict["question2"].numpy().decode("utf-8"),
430
+ str(tensor_dict["label"].numpy()),
431
+ )
432
+
433
+ def get_train_examples(self, data_dir):
434
+ """See base class."""
435
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
436
+
437
+ def get_dev_examples(self, data_dir):
438
+ """See base class."""
439
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
440
+
441
+ def get_test_examples(self, data_dir):
442
+ """See base class."""
443
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
444
+
445
+ def get_labels(self):
446
+ """See base class."""
447
+ return ["0", "1"]
448
+
449
+ def _create_examples(self, lines, set_type):
450
+ """Creates examples for the training, dev and test sets."""
451
+ test_mode = set_type == "test"
452
+ q1_index = 1 if test_mode else 3
453
+ q2_index = 2 if test_mode else 4
454
+ examples = []
455
+ for i, line in enumerate(lines):
456
+ if i == 0:
457
+ continue
458
+ guid = f"{set_type}-{line[0]}"
459
+ try:
460
+ text_a = line[q1_index]
461
+ text_b = line[q2_index]
462
+ label = None if test_mode else line[5]
463
+ except IndexError:
464
+ continue
465
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
466
+ return examples
467
+
468
+
469
+ class QnliProcessor(DataProcessor):
470
+ """Processor for the QNLI data set (GLUE version)."""
471
+
472
+ def __init__(self, *args, **kwargs):
473
+ super().__init__(*args, **kwargs)
474
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
475
+
476
+ def get_example_from_tensor_dict(self, tensor_dict):
477
+ """See base class."""
478
+ return InputExample(
479
+ tensor_dict["idx"].numpy(),
480
+ tensor_dict["question"].numpy().decode("utf-8"),
481
+ tensor_dict["sentence"].numpy().decode("utf-8"),
482
+ str(tensor_dict["label"].numpy()),
483
+ )
484
+
485
+ def get_train_examples(self, data_dir):
486
+ """See base class."""
487
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
488
+
489
+ def get_dev_examples(self, data_dir):
490
+ """See base class."""
491
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
492
+
493
+ def get_test_examples(self, data_dir):
494
+ """See base class."""
495
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
496
+
497
+ def get_labels(self):
498
+ """See base class."""
499
+ return ["entailment", "not_entailment"]
500
+
501
+ def _create_examples(self, lines, set_type):
502
+ """Creates examples for the training, dev and test sets."""
503
+ examples = []
504
+ for i, line in enumerate(lines):
505
+ if i == 0:
506
+ continue
507
+ guid = f"{set_type}-{line[0]}"
508
+ text_a = line[1]
509
+ text_b = line[2]
510
+ label = None if set_type == "test" else line[-1]
511
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
512
+ return examples
513
+
514
+
515
+ class RteProcessor(DataProcessor):
516
+ """Processor for the RTE data set (GLUE version)."""
517
+
518
+ def __init__(self, *args, **kwargs):
519
+ super().__init__(*args, **kwargs)
520
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
521
+
522
+ def get_example_from_tensor_dict(self, tensor_dict):
523
+ """See base class."""
524
+ return InputExample(
525
+ tensor_dict["idx"].numpy(),
526
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
527
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
528
+ str(tensor_dict["label"].numpy()),
529
+ )
530
+
531
+ def get_train_examples(self, data_dir):
532
+ """See base class."""
533
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
534
+
535
+ def get_dev_examples(self, data_dir):
536
+ """See base class."""
537
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
538
+
539
+ def get_test_examples(self, data_dir):
540
+ """See base class."""
541
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
542
+
543
+ def get_labels(self):
544
+ """See base class."""
545
+ return ["entailment", "not_entailment"]
546
+
547
+ def _create_examples(self, lines, set_type):
548
+ """Creates examples for the training, dev and test sets."""
549
+ examples = []
550
+ for i, line in enumerate(lines):
551
+ if i == 0:
552
+ continue
553
+ guid = f"{set_type}-{line[0]}"
554
+ text_a = line[1]
555
+ text_b = line[2]
556
+ label = None if set_type == "test" else line[-1]
557
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
558
+ return examples
559
+
560
+
561
+ class WnliProcessor(DataProcessor):
562
+ """Processor for the WNLI data set (GLUE version)."""
563
+
564
+ def __init__(self, *args, **kwargs):
565
+ super().__init__(*args, **kwargs)
566
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
567
+
568
+ def get_example_from_tensor_dict(self, tensor_dict):
569
+ """See base class."""
570
+ return InputExample(
571
+ tensor_dict["idx"].numpy(),
572
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
573
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
574
+ str(tensor_dict["label"].numpy()),
575
+ )
576
+
577
+ def get_train_examples(self, data_dir):
578
+ """See base class."""
579
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
580
+
581
+ def get_dev_examples(self, data_dir):
582
+ """See base class."""
583
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
584
+
585
+ def get_test_examples(self, data_dir):
586
+ """See base class."""
587
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
588
+
589
+ def get_labels(self):
590
+ """See base class."""
591
+ return ["0", "1"]
592
+
593
+ def _create_examples(self, lines, set_type):
594
+ """Creates examples for the training, dev and test sets."""
595
+ examples = []
596
+ for i, line in enumerate(lines):
597
+ if i == 0:
598
+ continue
599
+ guid = f"{set_type}-{line[0]}"
600
+ text_a = line[1]
601
+ text_b = line[2]
602
+ label = None if set_type == "test" else line[-1]
603
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
604
+ return examples
605
+
606
+
607
+ glue_tasks_num_labels = {
608
+ "cola": 2,
609
+ "mnli": 3,
610
+ "mrpc": 2,
611
+ "sst-2": 2,
612
+ "sts-b": 1,
613
+ "qqp": 2,
614
+ "qnli": 2,
615
+ "rte": 2,
616
+ "wnli": 2,
617
+ }
618
+
619
+ glue_processors = {
620
+ "cola": ColaProcessor,
621
+ "mnli": MnliProcessor,
622
+ "mnli-mm": MnliMismatchedProcessor,
623
+ "mrpc": MrpcProcessor,
624
+ "sst-2": Sst2Processor,
625
+ "sts-b": StsbProcessor,
626
+ "qqp": QqpProcessor,
627
+ "qnli": QnliProcessor,
628
+ "rte": RteProcessor,
629
+ "wnli": WnliProcessor,
630
+ }
631
+
632
+ glue_output_modes = {
633
+ "cola": "classification",
634
+ "mnli": "classification",
635
+ "mnli-mm": "classification",
636
+ "mrpc": "classification",
637
+ "sst-2": "classification",
638
+ "sts-b": "regression",
639
+ "qqp": "classification",
640
+ "qnli": "classification",
641
+ "rte": "classification",
642
+ "wnli": "classification",
643
+ }
venv/lib/python3.10/site-packages/transformers/data/processors/squad.py ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from functools import partial
18
+ from multiprocessing import Pool, cpu_count
19
+
20
+ import numpy as np
21
+ from tqdm import tqdm
22
+
23
+ from ...models.bert.tokenization_bert import whitespace_tokenize
24
+ from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
25
+ from ...utils import is_tf_available, is_torch_available, logging
26
+ from .utils import DataProcessor
27
+
28
+
29
+ # Store the tokenizers which insert 2 separators tokens
30
+ MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"}
31
+
32
+
33
+ if is_torch_available():
34
+ import torch
35
+ from torch.utils.data import TensorDataset
36
+
37
+ if is_tf_available():
38
+ import tensorflow as tf
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
44
+ """Returns tokenized answer spans that better match the annotated answer."""
45
+ tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
46
+
47
+ for new_start in range(input_start, input_end + 1):
48
+ for new_end in range(input_end, new_start - 1, -1):
49
+ text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
50
+ if text_span == tok_answer_text:
51
+ return (new_start, new_end)
52
+
53
+ return (input_start, input_end)
54
+
55
+
56
+ def _check_is_max_context(doc_spans, cur_span_index, position):
57
+ """Check if this is the 'max context' doc span for the token."""
58
+ best_score = None
59
+ best_span_index = None
60
+ for span_index, doc_span in enumerate(doc_spans):
61
+ end = doc_span.start + doc_span.length - 1
62
+ if position < doc_span.start:
63
+ continue
64
+ if position > end:
65
+ continue
66
+ num_left_context = position - doc_span.start
67
+ num_right_context = end - position
68
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
69
+ if best_score is None or score > best_score:
70
+ best_score = score
71
+ best_span_index = span_index
72
+
73
+ return cur_span_index == best_span_index
74
+
75
+
76
+ def _new_check_is_max_context(doc_spans, cur_span_index, position):
77
+ """Check if this is the 'max context' doc span for the token."""
78
+ # if len(doc_spans) == 1:
79
+ # return True
80
+ best_score = None
81
+ best_span_index = None
82
+ for span_index, doc_span in enumerate(doc_spans):
83
+ end = doc_span["start"] + doc_span["length"] - 1
84
+ if position < doc_span["start"]:
85
+ continue
86
+ if position > end:
87
+ continue
88
+ num_left_context = position - doc_span["start"]
89
+ num_right_context = end - position
90
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
91
+ if best_score is None or score > best_score:
92
+ best_score = score
93
+ best_span_index = span_index
94
+
95
+ return cur_span_index == best_span_index
96
+
97
+
98
+ def _is_whitespace(c):
99
+ if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
100
+ return True
101
+ return False
102
+
103
+
104
+ def squad_convert_example_to_features(
105
+ example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training
106
+ ):
107
+ features = []
108
+ if is_training and not example.is_impossible:
109
+ # Get start and end position
110
+ start_position = example.start_position
111
+ end_position = example.end_position
112
+
113
+ # If the answer cannot be found in the text, then skip this example.
114
+ actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
115
+ cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
116
+ if actual_text.find(cleaned_answer_text) == -1:
117
+ logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'")
118
+ return []
119
+
120
+ tok_to_orig_index = []
121
+ orig_to_tok_index = []
122
+ all_doc_tokens = []
123
+ for i, token in enumerate(example.doc_tokens):
124
+ orig_to_tok_index.append(len(all_doc_tokens))
125
+ if tokenizer.__class__.__name__ in [
126
+ "RobertaTokenizer",
127
+ "LongformerTokenizer",
128
+ "BartTokenizer",
129
+ "RobertaTokenizerFast",
130
+ "LongformerTokenizerFast",
131
+ "BartTokenizerFast",
132
+ ]:
133
+ sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
134
+ else:
135
+ sub_tokens = tokenizer.tokenize(token)
136
+ for sub_token in sub_tokens:
137
+ tok_to_orig_index.append(i)
138
+ all_doc_tokens.append(sub_token)
139
+
140
+ if is_training and not example.is_impossible:
141
+ tok_start_position = orig_to_tok_index[example.start_position]
142
+ if example.end_position < len(example.doc_tokens) - 1:
143
+ tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
144
+ else:
145
+ tok_end_position = len(all_doc_tokens) - 1
146
+
147
+ (tok_start_position, tok_end_position) = _improve_answer_span(
148
+ all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
149
+ )
150
+
151
+ spans = []
152
+
153
+ truncated_query = tokenizer.encode(
154
+ example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length
155
+ )
156
+
157
+ # Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling
158
+ # in the way they compute mask of added tokens.
159
+ tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower()
160
+ sequence_added_tokens = (
161
+ tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
162
+ if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET
163
+ else tokenizer.model_max_length - tokenizer.max_len_single_sentence
164
+ )
165
+ sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
166
+
167
+ span_doc_tokens = all_doc_tokens
168
+ while len(spans) * doc_stride < len(all_doc_tokens):
169
+ # Define the side we want to truncate / pad and the text/pair sorting
170
+ if tokenizer.padding_side == "right":
171
+ texts = truncated_query
172
+ pairs = span_doc_tokens
173
+ truncation = TruncationStrategy.ONLY_SECOND.value
174
+ else:
175
+ texts = span_doc_tokens
176
+ pairs = truncated_query
177
+ truncation = TruncationStrategy.ONLY_FIRST.value
178
+
179
+ encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic
180
+ texts,
181
+ pairs,
182
+ truncation=truncation,
183
+ padding=padding_strategy,
184
+ max_length=max_seq_length,
185
+ return_overflowing_tokens=True,
186
+ stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
187
+ return_token_type_ids=True,
188
+ )
189
+
190
+ paragraph_len = min(
191
+ len(all_doc_tokens) - len(spans) * doc_stride,
192
+ max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
193
+ )
194
+
195
+ if tokenizer.pad_token_id in encoded_dict["input_ids"]:
196
+ if tokenizer.padding_side == "right":
197
+ non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
198
+ else:
199
+ last_padding_id_position = (
200
+ len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
201
+ )
202
+ non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
203
+
204
+ else:
205
+ non_padded_ids = encoded_dict["input_ids"]
206
+
207
+ tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
208
+
209
+ token_to_orig_map = {}
210
+ for i in range(paragraph_len):
211
+ index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
212
+ token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
213
+
214
+ encoded_dict["paragraph_len"] = paragraph_len
215
+ encoded_dict["tokens"] = tokens
216
+ encoded_dict["token_to_orig_map"] = token_to_orig_map
217
+ encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
218
+ encoded_dict["token_is_max_context"] = {}
219
+ encoded_dict["start"] = len(spans) * doc_stride
220
+ encoded_dict["length"] = paragraph_len
221
+
222
+ spans.append(encoded_dict)
223
+
224
+ if "overflowing_tokens" not in encoded_dict or (
225
+ "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
226
+ ):
227
+ break
228
+ span_doc_tokens = encoded_dict["overflowing_tokens"]
229
+
230
+ for doc_span_index in range(len(spans)):
231
+ for j in range(spans[doc_span_index]["paragraph_len"]):
232
+ is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
233
+ index = (
234
+ j
235
+ if tokenizer.padding_side == "left"
236
+ else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
237
+ )
238
+ spans[doc_span_index]["token_is_max_context"][index] = is_max_context
239
+
240
+ for span in spans:
241
+ # Identify the position of the CLS token
242
+ cls_index = span["input_ids"].index(tokenizer.cls_token_id)
243
+
244
+ # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
245
+ # Original TF implementation also keep the classification token (set to 0)
246
+ p_mask = np.ones_like(span["token_type_ids"])
247
+ if tokenizer.padding_side == "right":
248
+ p_mask[len(truncated_query) + sequence_added_tokens :] = 0
249
+ else:
250
+ p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0
251
+
252
+ pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id)
253
+ special_token_indices = np.asarray(
254
+ tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True)
255
+ ).nonzero()
256
+
257
+ p_mask[pad_token_indices] = 1
258
+ p_mask[special_token_indices] = 1
259
+
260
+ # Set the cls index to 0: the CLS index can be used for impossible answers
261
+ p_mask[cls_index] = 0
262
+
263
+ span_is_impossible = example.is_impossible
264
+ start_position = 0
265
+ end_position = 0
266
+ if is_training and not span_is_impossible:
267
+ # For training, if our document chunk does not contain an annotation
268
+ # we throw it out, since there is nothing to predict.
269
+ doc_start = span["start"]
270
+ doc_end = span["start"] + span["length"] - 1
271
+ out_of_span = False
272
+
273
+ if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
274
+ out_of_span = True
275
+
276
+ if out_of_span:
277
+ start_position = cls_index
278
+ end_position = cls_index
279
+ span_is_impossible = True
280
+ else:
281
+ if tokenizer.padding_side == "left":
282
+ doc_offset = 0
283
+ else:
284
+ doc_offset = len(truncated_query) + sequence_added_tokens
285
+
286
+ start_position = tok_start_position - doc_start + doc_offset
287
+ end_position = tok_end_position - doc_start + doc_offset
288
+
289
+ features.append(
290
+ SquadFeatures(
291
+ span["input_ids"],
292
+ span["attention_mask"],
293
+ span["token_type_ids"],
294
+ cls_index,
295
+ p_mask.tolist(),
296
+ example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
297
+ unique_id=0,
298
+ paragraph_len=span["paragraph_len"],
299
+ token_is_max_context=span["token_is_max_context"],
300
+ tokens=span["tokens"],
301
+ token_to_orig_map=span["token_to_orig_map"],
302
+ start_position=start_position,
303
+ end_position=end_position,
304
+ is_impossible=span_is_impossible,
305
+ qas_id=example.qas_id,
306
+ )
307
+ )
308
+ return features
309
+
310
+
311
+ def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase):
312
+ global tokenizer
313
+ tokenizer = tokenizer_for_convert
314
+
315
+
316
+ def squad_convert_examples_to_features(
317
+ examples,
318
+ tokenizer,
319
+ max_seq_length,
320
+ doc_stride,
321
+ max_query_length,
322
+ is_training,
323
+ padding_strategy="max_length",
324
+ return_dataset=False,
325
+ threads=1,
326
+ tqdm_enabled=True,
327
+ ):
328
+ """
329
+ Converts a list of examples into a list of features that can be directly given as input to a model. It is
330
+ model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
331
+
332
+ Args:
333
+ examples: list of [`~data.processors.squad.SquadExample`]
334
+ tokenizer: an instance of a child of [`PreTrainedTokenizer`]
335
+ max_seq_length: The maximum sequence length of the inputs.
336
+ doc_stride: The stride used when the context is too large and is split across several features.
337
+ max_query_length: The maximum length of the query.
338
+ is_training: whether to create features for model evaluation or model training.
339
+ padding_strategy: Default to "max_length". Which padding strategy to use
340
+ return_dataset: Default False. Either 'pt' or 'tf'.
341
+ if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset
342
+ threads: multiple processing threads.
343
+
344
+
345
+ Returns:
346
+ list of [`~data.processors.squad.SquadFeatures`]
347
+
348
+ Example:
349
+
350
+ ```python
351
+ processor = SquadV2Processor()
352
+ examples = processor.get_dev_examples(data_dir)
353
+
354
+ features = squad_convert_examples_to_features(
355
+ examples=examples,
356
+ tokenizer=tokenizer,
357
+ max_seq_length=args.max_seq_length,
358
+ doc_stride=args.doc_stride,
359
+ max_query_length=args.max_query_length,
360
+ is_training=not evaluate,
361
+ )
362
+ ```"""
363
+ # Defining helper methods
364
+ features = []
365
+
366
+ threads = min(threads, cpu_count())
367
+ with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
368
+ annotate_ = partial(
369
+ squad_convert_example_to_features,
370
+ max_seq_length=max_seq_length,
371
+ doc_stride=doc_stride,
372
+ max_query_length=max_query_length,
373
+ padding_strategy=padding_strategy,
374
+ is_training=is_training,
375
+ )
376
+ features = list(
377
+ tqdm(
378
+ p.imap(annotate_, examples, chunksize=32),
379
+ total=len(examples),
380
+ desc="convert squad examples to features",
381
+ disable=not tqdm_enabled,
382
+ )
383
+ )
384
+
385
+ new_features = []
386
+ unique_id = 1000000000
387
+ example_index = 0
388
+ for example_features in tqdm(
389
+ features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled
390
+ ):
391
+ if not example_features:
392
+ continue
393
+ for example_feature in example_features:
394
+ example_feature.example_index = example_index
395
+ example_feature.unique_id = unique_id
396
+ new_features.append(example_feature)
397
+ unique_id += 1
398
+ example_index += 1
399
+ features = new_features
400
+ del new_features
401
+ if return_dataset == "pt":
402
+ if not is_torch_available():
403
+ raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
404
+
405
+ # Convert to Tensors and build dataset
406
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
407
+ all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
408
+ all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
409
+ all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
410
+ all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
411
+ all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
412
+
413
+ if not is_training:
414
+ all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
415
+ dataset = TensorDataset(
416
+ all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask
417
+ )
418
+ else:
419
+ all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
420
+ all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
421
+ dataset = TensorDataset(
422
+ all_input_ids,
423
+ all_attention_masks,
424
+ all_token_type_ids,
425
+ all_start_positions,
426
+ all_end_positions,
427
+ all_cls_index,
428
+ all_p_mask,
429
+ all_is_impossible,
430
+ )
431
+
432
+ return features, dataset
433
+ elif return_dataset == "tf":
434
+ if not is_tf_available():
435
+ raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
436
+
437
+ def gen():
438
+ for i, ex in enumerate(features):
439
+ if ex.token_type_ids is None:
440
+ yield (
441
+ {
442
+ "input_ids": ex.input_ids,
443
+ "attention_mask": ex.attention_mask,
444
+ "feature_index": i,
445
+ "qas_id": ex.qas_id,
446
+ },
447
+ {
448
+ "start_positions": ex.start_position,
449
+ "end_positions": ex.end_position,
450
+ "cls_index": ex.cls_index,
451
+ "p_mask": ex.p_mask,
452
+ "is_impossible": ex.is_impossible,
453
+ },
454
+ )
455
+ else:
456
+ yield (
457
+ {
458
+ "input_ids": ex.input_ids,
459
+ "attention_mask": ex.attention_mask,
460
+ "token_type_ids": ex.token_type_ids,
461
+ "feature_index": i,
462
+ "qas_id": ex.qas_id,
463
+ },
464
+ {
465
+ "start_positions": ex.start_position,
466
+ "end_positions": ex.end_position,
467
+ "cls_index": ex.cls_index,
468
+ "p_mask": ex.p_mask,
469
+ "is_impossible": ex.is_impossible,
470
+ },
471
+ )
472
+
473
+ # Why have we split the batch into a tuple? PyTorch just has a list of tensors.
474
+ if "token_type_ids" in tokenizer.model_input_names:
475
+ train_types = (
476
+ {
477
+ "input_ids": tf.int32,
478
+ "attention_mask": tf.int32,
479
+ "token_type_ids": tf.int32,
480
+ "feature_index": tf.int64,
481
+ "qas_id": tf.string,
482
+ },
483
+ {
484
+ "start_positions": tf.int64,
485
+ "end_positions": tf.int64,
486
+ "cls_index": tf.int64,
487
+ "p_mask": tf.int32,
488
+ "is_impossible": tf.int32,
489
+ },
490
+ )
491
+
492
+ train_shapes = (
493
+ {
494
+ "input_ids": tf.TensorShape([None]),
495
+ "attention_mask": tf.TensorShape([None]),
496
+ "token_type_ids": tf.TensorShape([None]),
497
+ "feature_index": tf.TensorShape([]),
498
+ "qas_id": tf.TensorShape([]),
499
+ },
500
+ {
501
+ "start_positions": tf.TensorShape([]),
502
+ "end_positions": tf.TensorShape([]),
503
+ "cls_index": tf.TensorShape([]),
504
+ "p_mask": tf.TensorShape([None]),
505
+ "is_impossible": tf.TensorShape([]),
506
+ },
507
+ )
508
+ else:
509
+ train_types = (
510
+ {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string},
511
+ {
512
+ "start_positions": tf.int64,
513
+ "end_positions": tf.int64,
514
+ "cls_index": tf.int64,
515
+ "p_mask": tf.int32,
516
+ "is_impossible": tf.int32,
517
+ },
518
+ )
519
+
520
+ train_shapes = (
521
+ {
522
+ "input_ids": tf.TensorShape([None]),
523
+ "attention_mask": tf.TensorShape([None]),
524
+ "feature_index": tf.TensorShape([]),
525
+ "qas_id": tf.TensorShape([]),
526
+ },
527
+ {
528
+ "start_positions": tf.TensorShape([]),
529
+ "end_positions": tf.TensorShape([]),
530
+ "cls_index": tf.TensorShape([]),
531
+ "p_mask": tf.TensorShape([None]),
532
+ "is_impossible": tf.TensorShape([]),
533
+ },
534
+ )
535
+
536
+ return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
537
+ else:
538
+ return features
539
+
540
+
541
+ class SquadProcessor(DataProcessor):
542
+ """
543
+ Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
544
+ version 2.0 of SQuAD, respectively.
545
+ """
546
+
547
+ train_file = None
548
+ dev_file = None
549
+
550
+ def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
551
+ if not evaluate:
552
+ answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
553
+ answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
554
+ answers = []
555
+ else:
556
+ answers = [
557
+ {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
558
+ for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
559
+ ]
560
+
561
+ answer = None
562
+ answer_start = None
563
+
564
+ return SquadExample(
565
+ qas_id=tensor_dict["id"].numpy().decode("utf-8"),
566
+ question_text=tensor_dict["question"].numpy().decode("utf-8"),
567
+ context_text=tensor_dict["context"].numpy().decode("utf-8"),
568
+ answer_text=answer,
569
+ start_position_character=answer_start,
570
+ title=tensor_dict["title"].numpy().decode("utf-8"),
571
+ answers=answers,
572
+ )
573
+
574
+ def get_examples_from_dataset(self, dataset, evaluate=False):
575
+ """
576
+ Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
577
+
578
+ Args:
579
+ dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
580
+ evaluate: Boolean specifying if in evaluation mode or in training mode
581
+
582
+ Returns:
583
+ List of SquadExample
584
+
585
+ Examples:
586
+
587
+ ```python
588
+ >>> import tensorflow_datasets as tfds
589
+
590
+ >>> dataset = tfds.load("squad")
591
+
592
+ >>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
593
+ >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
594
+ ```"""
595
+
596
+ if evaluate:
597
+ dataset = dataset["validation"]
598
+ else:
599
+ dataset = dataset["train"]
600
+
601
+ examples = []
602
+ for tensor_dict in tqdm(dataset):
603
+ examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
604
+
605
+ return examples
606
+
607
+ def get_train_examples(self, data_dir, filename=None):
608
+ """
609
+ Returns the training examples from the data directory.
610
+
611
+ Args:
612
+ data_dir: Directory containing the data files used for training and evaluating.
613
+ filename: None by default, specify this if the training file has a different name than the original one
614
+ which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
615
+
616
+ """
617
+ if data_dir is None:
618
+ data_dir = ""
619
+
620
+ if self.train_file is None:
621
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
622
+
623
+ with open(
624
+ os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
625
+ ) as reader:
626
+ input_data = json.load(reader)["data"]
627
+ return self._create_examples(input_data, "train")
628
+
629
+ def get_dev_examples(self, data_dir, filename=None):
630
+ """
631
+ Returns the evaluation example from the data directory.
632
+
633
+ Args:
634
+ data_dir: Directory containing the data files used for training and evaluating.
635
+ filename: None by default, specify this if the evaluation file has a different name than the original one
636
+ which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
637
+ """
638
+ if data_dir is None:
639
+ data_dir = ""
640
+
641
+ if self.dev_file is None:
642
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
643
+
644
+ with open(
645
+ os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
646
+ ) as reader:
647
+ input_data = json.load(reader)["data"]
648
+ return self._create_examples(input_data, "dev")
649
+
650
+ def _create_examples(self, input_data, set_type):
651
+ is_training = set_type == "train"
652
+ examples = []
653
+ for entry in tqdm(input_data):
654
+ title = entry["title"]
655
+ for paragraph in entry["paragraphs"]:
656
+ context_text = paragraph["context"]
657
+ for qa in paragraph["qas"]:
658
+ qas_id = qa["id"]
659
+ question_text = qa["question"]
660
+ start_position_character = None
661
+ answer_text = None
662
+ answers = []
663
+
664
+ is_impossible = qa.get("is_impossible", False)
665
+ if not is_impossible:
666
+ if is_training:
667
+ answer = qa["answers"][0]
668
+ answer_text = answer["text"]
669
+ start_position_character = answer["answer_start"]
670
+ else:
671
+ answers = qa["answers"]
672
+
673
+ example = SquadExample(
674
+ qas_id=qas_id,
675
+ question_text=question_text,
676
+ context_text=context_text,
677
+ answer_text=answer_text,
678
+ start_position_character=start_position_character,
679
+ title=title,
680
+ is_impossible=is_impossible,
681
+ answers=answers,
682
+ )
683
+ examples.append(example)
684
+ return examples
685
+
686
+
687
+ class SquadV1Processor(SquadProcessor):
688
+ train_file = "train-v1.1.json"
689
+ dev_file = "dev-v1.1.json"
690
+
691
+
692
+ class SquadV2Processor(SquadProcessor):
693
+ train_file = "train-v2.0.json"
694
+ dev_file = "dev-v2.0.json"
695
+
696
+
697
+ class SquadExample:
698
+ """
699
+ A single training/test example for the Squad dataset, as loaded from disk.
700
+
701
+ Args:
702
+ qas_id: The example's unique identifier
703
+ question_text: The question string
704
+ context_text: The context string
705
+ answer_text: The answer string
706
+ start_position_character: The character position of the start of the answer
707
+ title: The title of the example
708
+ answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
709
+ is_impossible: False by default, set to True if the example has no possible answer.
710
+ """
711
+
712
+ def __init__(
713
+ self,
714
+ qas_id,
715
+ question_text,
716
+ context_text,
717
+ answer_text,
718
+ start_position_character,
719
+ title,
720
+ answers=[],
721
+ is_impossible=False,
722
+ ):
723
+ self.qas_id = qas_id
724
+ self.question_text = question_text
725
+ self.context_text = context_text
726
+ self.answer_text = answer_text
727
+ self.title = title
728
+ self.is_impossible = is_impossible
729
+ self.answers = answers
730
+
731
+ self.start_position, self.end_position = 0, 0
732
+
733
+ doc_tokens = []
734
+ char_to_word_offset = []
735
+ prev_is_whitespace = True
736
+
737
+ # Split on whitespace so that different tokens may be attributed to their original position.
738
+ for c in self.context_text:
739
+ if _is_whitespace(c):
740
+ prev_is_whitespace = True
741
+ else:
742
+ if prev_is_whitespace:
743
+ doc_tokens.append(c)
744
+ else:
745
+ doc_tokens[-1] += c
746
+ prev_is_whitespace = False
747
+ char_to_word_offset.append(len(doc_tokens) - 1)
748
+
749
+ self.doc_tokens = doc_tokens
750
+ self.char_to_word_offset = char_to_word_offset
751
+
752
+ # Start and end positions only has a value during evaluation.
753
+ if start_position_character is not None and not is_impossible:
754
+ self.start_position = char_to_word_offset[start_position_character]
755
+ self.end_position = char_to_word_offset[
756
+ min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
757
+ ]
758
+
759
+
760
+ class SquadFeatures:
761
+ """
762
+ Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
763
+ [`~data.processors.squad.SquadExample`] using the
764
+ :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
765
+
766
+ Args:
767
+ input_ids: Indices of input sequence tokens in the vocabulary.
768
+ attention_mask: Mask to avoid performing attention on padding token indices.
769
+ token_type_ids: Segment token indices to indicate first and second portions of the inputs.
770
+ cls_index: the index of the CLS token.
771
+ p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
772
+ Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
773
+ example_index: the index of the example
774
+ unique_id: The unique Feature identifier
775
+ paragraph_len: The length of the context
776
+ token_is_max_context:
777
+ List of booleans identifying which tokens have their maximum context in this feature object. If a token
778
+ does not have their maximum context in this feature object, it means that another feature object has more
779
+ information related to that token and should be prioritized over this feature for that token.
780
+ tokens: list of tokens corresponding to the input ids
781
+ token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
782
+ start_position: start of the answer token index
783
+ end_position: end of the answer token index
784
+ encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
785
+ """
786
+
787
+ def __init__(
788
+ self,
789
+ input_ids,
790
+ attention_mask,
791
+ token_type_ids,
792
+ cls_index,
793
+ p_mask,
794
+ example_index,
795
+ unique_id,
796
+ paragraph_len,
797
+ token_is_max_context,
798
+ tokens,
799
+ token_to_orig_map,
800
+ start_position,
801
+ end_position,
802
+ is_impossible,
803
+ qas_id: str = None,
804
+ encoding: BatchEncoding = None,
805
+ ):
806
+ self.input_ids = input_ids
807
+ self.attention_mask = attention_mask
808
+ self.token_type_ids = token_type_ids
809
+ self.cls_index = cls_index
810
+ self.p_mask = p_mask
811
+
812
+ self.example_index = example_index
813
+ self.unique_id = unique_id
814
+ self.paragraph_len = paragraph_len
815
+ self.token_is_max_context = token_is_max_context
816
+ self.tokens = tokens
817
+ self.token_to_orig_map = token_to_orig_map
818
+
819
+ self.start_position = start_position
820
+ self.end_position = end_position
821
+ self.is_impossible = is_impossible
822
+ self.qas_id = qas_id
823
+
824
+ self.encoding = encoding
825
+
826
+
827
+ class SquadResult:
828
+ """
829
+ Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
830
+
831
+ Args:
832
+ unique_id: The unique identifier corresponding to that example.
833
+ start_logits: The logits corresponding to the start of the answer
834
+ end_logits: The logits corresponding to the end of the answer
835
+ """
836
+
837
+ def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
838
+ self.start_logits = start_logits
839
+ self.end_logits = end_logits
840
+ self.unique_id = unique_id
841
+
842
+ if start_top_index:
843
+ self.start_top_index = start_top_index
844
+ self.end_top_index = end_top_index
845
+ self.cls_logits = cls_logits
venv/lib/python3.10/site-packages/transformers/data/processors/utils.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import csv
18
+ import dataclasses
19
+ import json
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Union
22
+
23
+ from ...utils import is_tf_available, is_torch_available, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ @dataclass
30
+ class InputExample:
31
+ """
32
+ A single training/test example for simple sequence classification.
33
+
34
+ Args:
35
+ guid: Unique id for the example.
36
+ text_a: string. The untokenized text of the first sequence. For single
37
+ sequence tasks, only this sequence must be specified.
38
+ text_b: (Optional) string. The untokenized text of the second sequence.
39
+ Only must be specified for sequence pair tasks.
40
+ label: (Optional) string. The label of the example. This should be
41
+ specified for train and dev examples, but not for test examples.
42
+ """
43
+
44
+ guid: str
45
+ text_a: str
46
+ text_b: Optional[str] = None
47
+ label: Optional[str] = None
48
+
49
+ def to_json_string(self):
50
+ """Serializes this instance to a JSON string."""
51
+ return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
52
+
53
+
54
+ @dataclass(frozen=True)
55
+ class InputFeatures:
56
+ """
57
+ A single set of features of data. Property names are the same names as the corresponding inputs to a model.
58
+
59
+ Args:
60
+ input_ids: Indices of input sequence tokens in the vocabulary.
61
+ attention_mask: Mask to avoid performing attention on padding token indices.
62
+ Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
63
+ tokens.
64
+ token_type_ids: (Optional) Segment token indices to indicate first and second
65
+ portions of the inputs. Only some models use them.
66
+ label: (Optional) Label corresponding to the input. Int for classification problems,
67
+ float for regression problems.
68
+ """
69
+
70
+ input_ids: List[int]
71
+ attention_mask: Optional[List[int]] = None
72
+ token_type_ids: Optional[List[int]] = None
73
+ label: Optional[Union[int, float]] = None
74
+
75
+ def to_json_string(self):
76
+ """Serializes this instance to a JSON string."""
77
+ return json.dumps(dataclasses.asdict(self)) + "\n"
78
+
79
+
80
+ class DataProcessor:
81
+ """Base class for data converters for sequence classification data sets."""
82
+
83
+ def get_example_from_tensor_dict(self, tensor_dict):
84
+ """
85
+ Gets an example from a dict with tensorflow tensors.
86
+
87
+ Args:
88
+ tensor_dict: Keys and values should match the corresponding Glue
89
+ tensorflow_dataset examples.
90
+ """
91
+ raise NotImplementedError()
92
+
93
+ def get_train_examples(self, data_dir):
94
+ """Gets a collection of [`InputExample`] for the train set."""
95
+ raise NotImplementedError()
96
+
97
+ def get_dev_examples(self, data_dir):
98
+ """Gets a collection of [`InputExample`] for the dev set."""
99
+ raise NotImplementedError()
100
+
101
+ def get_test_examples(self, data_dir):
102
+ """Gets a collection of [`InputExample`] for the test set."""
103
+ raise NotImplementedError()
104
+
105
+ def get_labels(self):
106
+ """Gets the list of labels for this data set."""
107
+ raise NotImplementedError()
108
+
109
+ def tfds_map(self, example):
110
+ """
111
+ Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
112
+ examples to the correct format.
113
+ """
114
+ if len(self.get_labels()) > 1:
115
+ example.label = self.get_labels()[int(example.label)]
116
+ return example
117
+
118
+ @classmethod
119
+ def _read_tsv(cls, input_file, quotechar=None):
120
+ """Reads a tab separated value file."""
121
+ with open(input_file, "r", encoding="utf-8-sig") as f:
122
+ return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
123
+
124
+
125
+ class SingleSentenceClassificationProcessor(DataProcessor):
126
+ """Generic processor for a single sentence classification data set."""
127
+
128
+ def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
129
+ self.labels = [] if labels is None else labels
130
+ self.examples = [] if examples is None else examples
131
+ self.mode = mode
132
+ self.verbose = verbose
133
+
134
+ def __len__(self):
135
+ return len(self.examples)
136
+
137
+ def __getitem__(self, idx):
138
+ if isinstance(idx, slice):
139
+ return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
140
+ return self.examples[idx]
141
+
142
+ @classmethod
143
+ def create_from_csv(
144
+ cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
145
+ ):
146
+ processor = cls(**kwargs)
147
+ processor.add_examples_from_csv(
148
+ file_name,
149
+ split_name=split_name,
150
+ column_label=column_label,
151
+ column_text=column_text,
152
+ column_id=column_id,
153
+ skip_first_row=skip_first_row,
154
+ overwrite_labels=True,
155
+ overwrite_examples=True,
156
+ )
157
+ return processor
158
+
159
+ @classmethod
160
+ def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
161
+ processor = cls(**kwargs)
162
+ processor.add_examples(texts_or_text_and_labels, labels=labels)
163
+ return processor
164
+
165
+ def add_examples_from_csv(
166
+ self,
167
+ file_name,
168
+ split_name="",
169
+ column_label=0,
170
+ column_text=1,
171
+ column_id=None,
172
+ skip_first_row=False,
173
+ overwrite_labels=False,
174
+ overwrite_examples=False,
175
+ ):
176
+ lines = self._read_tsv(file_name)
177
+ if skip_first_row:
178
+ lines = lines[1:]
179
+ texts = []
180
+ labels = []
181
+ ids = []
182
+ for i, line in enumerate(lines):
183
+ texts.append(line[column_text])
184
+ labels.append(line[column_label])
185
+ if column_id is not None:
186
+ ids.append(line[column_id])
187
+ else:
188
+ guid = f"{split_name}-{i}" if split_name else str(i)
189
+ ids.append(guid)
190
+
191
+ return self.add_examples(
192
+ texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
193
+ )
194
+
195
+ def add_examples(
196
+ self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
197
+ ):
198
+ if labels is not None and len(texts_or_text_and_labels) != len(labels):
199
+ raise ValueError(
200
+ f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
201
+ )
202
+ if ids is not None and len(texts_or_text_and_labels) != len(ids):
203
+ raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
204
+ if ids is None:
205
+ ids = [None] * len(texts_or_text_and_labels)
206
+ if labels is None:
207
+ labels = [None] * len(texts_or_text_and_labels)
208
+ examples = []
209
+ added_labels = set()
210
+ for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
211
+ if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
212
+ text, label = text_or_text_and_label
213
+ else:
214
+ text = text_or_text_and_label
215
+ added_labels.add(label)
216
+ examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
217
+
218
+ # Update examples
219
+ if overwrite_examples:
220
+ self.examples = examples
221
+ else:
222
+ self.examples.extend(examples)
223
+
224
+ # Update labels
225
+ if overwrite_labels:
226
+ self.labels = list(added_labels)
227
+ else:
228
+ self.labels = list(set(self.labels).union(added_labels))
229
+
230
+ return self.examples
231
+
232
+ def get_features(
233
+ self,
234
+ tokenizer,
235
+ max_length=None,
236
+ pad_on_left=False,
237
+ pad_token=0,
238
+ mask_padding_with_zero=True,
239
+ return_tensors=None,
240
+ ):
241
+ """
242
+ Convert examples in a list of `InputFeatures`
243
+
244
+ Args:
245
+ tokenizer: Instance of a tokenizer that will tokenize the examples
246
+ max_length: Maximum example length
247
+ pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
248
+ pad_token: Padding token
249
+ mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
250
+ and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
251
+ values)
252
+
253
+ Returns:
254
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
255
+ task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
256
+ `InputFeatures` which can be fed to the model.
257
+
258
+ """
259
+ if max_length is None:
260
+ max_length = tokenizer.max_len
261
+
262
+ label_map = {label: i for i, label in enumerate(self.labels)}
263
+
264
+ all_input_ids = []
265
+ for ex_index, example in enumerate(self.examples):
266
+ if ex_index % 10000 == 0:
267
+ logger.info(f"Tokenizing example {ex_index}")
268
+
269
+ input_ids = tokenizer.encode(
270
+ example.text_a,
271
+ add_special_tokens=True,
272
+ max_length=min(max_length, tokenizer.max_len),
273
+ )
274
+ all_input_ids.append(input_ids)
275
+
276
+ batch_length = max(len(input_ids) for input_ids in all_input_ids)
277
+
278
+ features = []
279
+ for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
280
+ if ex_index % 10000 == 0:
281
+ logger.info(f"Writing example {ex_index}/{len(self.examples)}")
282
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
283
+ # tokens are attended to.
284
+ attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
285
+
286
+ # Zero-pad up to the sequence length.
287
+ padding_length = batch_length - len(input_ids)
288
+ if pad_on_left:
289
+ input_ids = ([pad_token] * padding_length) + input_ids
290
+ attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
291
+ else:
292
+ input_ids = input_ids + ([pad_token] * padding_length)
293
+ attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
294
+
295
+ if len(input_ids) != batch_length:
296
+ raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
297
+ if len(attention_mask) != batch_length:
298
+ raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
299
+
300
+ if self.mode == "classification":
301
+ label = label_map[example.label]
302
+ elif self.mode == "regression":
303
+ label = float(example.label)
304
+ else:
305
+ raise ValueError(self.mode)
306
+
307
+ if ex_index < 5 and self.verbose:
308
+ logger.info("*** Example ***")
309
+ logger.info(f"guid: {example.guid}")
310
+ logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
311
+ logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
312
+ logger.info(f"label: {example.label} (id = {label})")
313
+
314
+ features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
315
+
316
+ if return_tensors is None:
317
+ return features
318
+ elif return_tensors == "tf":
319
+ if not is_tf_available():
320
+ raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
321
+ import tensorflow as tf
322
+
323
+ def gen():
324
+ for ex in features:
325
+ yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
326
+
327
+ dataset = tf.data.Dataset.from_generator(
328
+ gen,
329
+ ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
330
+ ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
331
+ )
332
+ return dataset
333
+ elif return_tensors == "pt":
334
+ if not is_torch_available():
335
+ raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
336
+ import torch
337
+ from torch.utils.data import TensorDataset
338
+
339
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
340
+ all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
341
+ if self.mode == "classification":
342
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
343
+ elif self.mode == "regression":
344
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
345
+
346
+ dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
347
+ return dataset
348
+ else:
349
+ raise ValueError("return_tensors should be one of 'tf' or 'pt'")
venv/lib/python3.10/site-packages/transformers/data/processors/xnli.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ XNLI utils (dataset loading and evaluation)"""
17
+
18
+
19
+ import os
20
+
21
+ from ...utils import logging
22
+ from .utils import DataProcessor, InputExample
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class XnliProcessor(DataProcessor):
29
+ """
30
+ Processor for the XNLI dataset. Adapted from
31
+ https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
32
+ """
33
+
34
+ def __init__(self, language, train_language=None):
35
+ self.language = language
36
+ self.train_language = train_language
37
+
38
+ def get_train_examples(self, data_dir):
39
+ """See base class."""
40
+ lg = self.language if self.train_language is None else self.train_language
41
+ lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
42
+ examples = []
43
+ for i, line in enumerate(lines):
44
+ if i == 0:
45
+ continue
46
+ guid = f"train-{i}"
47
+ text_a = line[0]
48
+ text_b = line[1]
49
+ label = "contradiction" if line[2] == "contradictory" else line[2]
50
+ if not isinstance(text_a, str):
51
+ raise ValueError(f"Training input {text_a} is not a string")
52
+ if not isinstance(text_b, str):
53
+ raise ValueError(f"Training input {text_b} is not a string")
54
+ if not isinstance(label, str):
55
+ raise ValueError(f"Training label {label} is not a string")
56
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
57
+ return examples
58
+
59
+ def get_test_examples(self, data_dir):
60
+ """See base class."""
61
+ lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
62
+ examples = []
63
+ for i, line in enumerate(lines):
64
+ if i == 0:
65
+ continue
66
+ language = line[0]
67
+ if language != self.language:
68
+ continue
69
+ guid = f"test-{i}"
70
+ text_a = line[6]
71
+ text_b = line[7]
72
+ label = line[1]
73
+ if not isinstance(text_a, str):
74
+ raise ValueError(f"Training input {text_a} is not a string")
75
+ if not isinstance(text_b, str):
76
+ raise ValueError(f"Training input {text_b} is not a string")
77
+ if not isinstance(label, str):
78
+ raise ValueError(f"Training label {label} is not a string")
79
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
80
+ return examples
81
+
82
+ def get_labels(self):
83
+ """See base class."""
84
+ return ["contradiction", "entailment", "neutral"]
85
+
86
+
87
+ xnli_processors = {
88
+ "xnli": XnliProcessor,
89
+ }
90
+
91
+ xnli_output_modes = {
92
+ "xnli": "classification",
93
+ }
94
+
95
+ xnli_tasks_num_labels = {
96
+ "xnli": 3,
97
+ }
venv/lib/python3.10/site-packages/transformers/onnx/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import _LazyModule
18
+
19
+
20
+ _import_structure = {
21
+ "config": [
22
+ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
23
+ "OnnxConfig",
24
+ "OnnxConfigWithPast",
25
+ "OnnxSeq2SeqConfigWithPast",
26
+ "PatchingSpec",
27
+ ],
28
+ "convert": ["export", "validate_model_outputs"],
29
+ "features": ["FeaturesManager"],
30
+ "utils": ["ParameterFormat", "compute_serialized_parameters_size"],
31
+ }
32
+
33
+
34
+ if TYPE_CHECKING:
35
+ from .config import (
36
+ EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
37
+ OnnxConfig,
38
+ OnnxConfigWithPast,
39
+ OnnxSeq2SeqConfigWithPast,
40
+ PatchingSpec,
41
+ )
42
+ from .convert import export, validate_model_outputs
43
+ from .features import FeaturesManager
44
+ from .utils import ParameterFormat, compute_serialized_parameters_size
45
+
46
+ else:
47
+ import sys
48
+
49
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/onnx/__main__.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import subprocess
15
+ import sys
16
+ import warnings
17
+ from argparse import ArgumentParser
18
+ from pathlib import Path
19
+
20
+ from packaging import version
21
+
22
+ from .. import AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer
23
+ from ..utils import logging
24
+ from ..utils.import_utils import is_optimum_available
25
+ from .convert import export, validate_model_outputs
26
+ from .features import FeaturesManager
27
+ from .utils import get_preprocessor
28
+
29
+
30
+ MIN_OPTIMUM_VERSION = "1.5.0"
31
+
32
+ ENCODER_DECODER_MODELS = ["vision-encoder-decoder"]
33
+
34
+
35
+ def export_with_optimum(args):
36
+ if is_optimum_available():
37
+ from optimum.version import __version__ as optimum_version
38
+
39
+ parsed_optimum_version = version.parse(optimum_version)
40
+ if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION):
41
+ raise RuntimeError(
42
+ f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You "
43
+ "can upgrade optimum by running: pip install -U optimum[exporters]"
44
+ )
45
+ else:
46
+ raise RuntimeError(
47
+ "transformers.onnx requires optimum to run, you can install the library by running: pip install "
48
+ "optimum[exporters]"
49
+ )
50
+ cmd_line = [
51
+ sys.executable,
52
+ "-m",
53
+ "optimum.exporters.onnx",
54
+ f"--model {args.model}",
55
+ f"--task {args.feature}",
56
+ f"--framework {args.framework}" if args.framework is not None else "",
57
+ f"{args.output}",
58
+ ]
59
+ proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE)
60
+ proc.wait()
61
+
62
+ logger.info(
63
+ "The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as "
64
+ "transformers.onnx is deprecated, and will be removed in v5. You can find more information here: "
65
+ "https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model."
66
+ )
67
+
68
+
69
+ def export_with_transformers(args):
70
+ args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx")
71
+ if not args.output.parent.exists():
72
+ args.output.parent.mkdir(parents=True)
73
+
74
+ # Allocate the model
75
+ model = FeaturesManager.get_model_from_feature(
76
+ args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir
77
+ )
78
+
79
+ model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature)
80
+ onnx_config = model_onnx_config(model.config)
81
+
82
+ if model_kind in ENCODER_DECODER_MODELS:
83
+ encoder_model = model.get_encoder()
84
+ decoder_model = model.get_decoder()
85
+
86
+ encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config)
87
+ decoder_onnx_config = onnx_config.get_decoder_config(
88
+ encoder_model.config, decoder_model.config, feature=args.feature
89
+ )
90
+
91
+ if args.opset is None:
92
+ args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)
93
+
94
+ if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset):
95
+ raise ValueError(
96
+ f"Opset {args.opset} is not sufficient to export {model_kind}. At least "
97
+ f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required."
98
+ )
99
+
100
+ preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
101
+
102
+ onnx_inputs, onnx_outputs = export(
103
+ preprocessor,
104
+ encoder_model,
105
+ encoder_onnx_config,
106
+ args.opset,
107
+ args.output.parent.joinpath("encoder_model.onnx"),
108
+ )
109
+
110
+ validate_model_outputs(
111
+ encoder_onnx_config,
112
+ preprocessor,
113
+ encoder_model,
114
+ args.output.parent.joinpath("encoder_model.onnx"),
115
+ onnx_outputs,
116
+ args.atol if args.atol else encoder_onnx_config.atol_for_validation,
117
+ )
118
+
119
+ preprocessor = AutoTokenizer.from_pretrained(args.model)
120
+
121
+ onnx_inputs, onnx_outputs = export(
122
+ preprocessor,
123
+ decoder_model,
124
+ decoder_onnx_config,
125
+ args.opset,
126
+ args.output.parent.joinpath("decoder_model.onnx"),
127
+ )
128
+
129
+ validate_model_outputs(
130
+ decoder_onnx_config,
131
+ preprocessor,
132
+ decoder_model,
133
+ args.output.parent.joinpath("decoder_model.onnx"),
134
+ onnx_outputs,
135
+ args.atol if args.atol else decoder_onnx_config.atol_for_validation,
136
+ )
137
+ logger.info(
138
+ f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()},"
139
+ f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}"
140
+ )
141
+
142
+ else:
143
+ # Instantiate the appropriate preprocessor
144
+ if args.preprocessor == "auto":
145
+ preprocessor = get_preprocessor(args.model)
146
+ elif args.preprocessor == "tokenizer":
147
+ preprocessor = AutoTokenizer.from_pretrained(args.model)
148
+ elif args.preprocessor == "image_processor":
149
+ preprocessor = AutoImageProcessor.from_pretrained(args.model)
150
+ elif args.preprocessor == "feature_extractor":
151
+ preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
152
+ elif args.preprocessor == "processor":
153
+ preprocessor = AutoProcessor.from_pretrained(args.model)
154
+ else:
155
+ raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'")
156
+
157
+ # Ensure the requested opset is sufficient
158
+ if args.opset is None:
159
+ args.opset = onnx_config.default_onnx_opset
160
+
161
+ if args.opset < onnx_config.default_onnx_opset:
162
+ raise ValueError(
163
+ f"Opset {args.opset} is not sufficient to export {model_kind}. "
164
+ f"At least {onnx_config.default_onnx_opset} is required."
165
+ )
166
+
167
+ onnx_inputs, onnx_outputs = export(
168
+ preprocessor,
169
+ model,
170
+ onnx_config,
171
+ args.opset,
172
+ args.output,
173
+ )
174
+
175
+ if args.atol is None:
176
+ args.atol = onnx_config.atol_for_validation
177
+
178
+ validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol)
179
+ logger.info(f"All good, model saved at: {args.output.as_posix()}")
180
+ warnings.warn(
181
+ "The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend"
182
+ " using optimum.exporters.onnx in future. You can find more information here:"
183
+ " https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.",
184
+ FutureWarning,
185
+ )
186
+
187
+
188
+ def main():
189
+ parser = ArgumentParser("Hugging Face Transformers ONNX exporter")
190
+ parser.add_argument(
191
+ "-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from."
192
+ )
193
+ parser.add_argument(
194
+ "--feature",
195
+ default="default",
196
+ help="The type of features to export the model with.",
197
+ )
198
+ parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.")
199
+ parser.add_argument(
200
+ "--atol", type=float, default=None, help="Absolute difference tolerance when validating the model."
201
+ )
202
+ parser.add_argument(
203
+ "--framework",
204
+ type=str,
205
+ choices=["pt", "tf"],
206
+ default=None,
207
+ help=(
208
+ "The framework to use for the ONNX export."
209
+ " If not provided, will attempt to use the local checkpoint's original framework"
210
+ " or what is available in the environment."
211
+ ),
212
+ )
213
+ parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.")
214
+ parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.")
215
+ parser.add_argument(
216
+ "--preprocessor",
217
+ type=str,
218
+ choices=["auto", "tokenizer", "feature_extractor", "image_processor", "processor"],
219
+ default="auto",
220
+ help="Which type of preprocessor to use. 'auto' tries to automatically detect it.",
221
+ )
222
+ parser.add_argument(
223
+ "--export_with_transformers",
224
+ action="store_true",
225
+ help=(
226
+ "Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be "
227
+ "useful when exporting a model supported in transformers but not in optimum, otherwise it is not "
228
+ "recommended."
229
+ ),
230
+ )
231
+
232
+ args = parser.parse_args()
233
+ if args.export_with_transformers or not is_optimum_available():
234
+ export_with_transformers(args)
235
+ else:
236
+ export_with_optimum(args)
237
+
238
+
239
+ if __name__ == "__main__":
240
+ logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name
241
+ logger.setLevel(logging.INFO)
242
+ main()
venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (875 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (5.88 kB). View file
 
venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
venv/lib/python3.10/site-packages/transformers/onnx/config.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import copy
15
+ import dataclasses
16
+ import warnings
17
+ from abc import ABC, abstractmethod
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ from packaging import version
23
+
24
+ from ..utils import TensorType, is_torch_available, is_vision_available, logging
25
+ from .utils import ParameterFormat, compute_effective_axis_dimension, compute_serialized_parameters_size
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from ..configuration_utils import PretrainedConfig
30
+ from ..feature_extraction_utils import FeatureExtractionMixin
31
+ from ..image_processing_utils import ImageProcessingMixin
32
+ from ..tokenization_utils_base import PreTrainedTokenizerBase
33
+
34
+
35
+ if is_vision_available():
36
+ from PIL import Image
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ DEFAULT_ONNX_OPSET = 11
42
+
43
+ # 2 Gb
44
+ EXTERNAL_DATA_FORMAT_SIZE_LIMIT = 2 * 1024 * 1024 * 1024
45
+
46
+
47
+ @dataclasses.dataclass
48
+ class PatchingSpec:
49
+ """
50
+ Data class that holds patching specifications.
51
+
52
+ Args:
53
+ o: Module / object where the op to patch is located
54
+ name: Name of the op to monkey patch
55
+ custom_op: Custom op that patches the original op
56
+ orig_op: Original op that is being patched
57
+ op_wrapper: Wrapper (optional) that wraps both the original and custom ops.
58
+ It is useful for ops that are class or static methods for instance.
59
+ """
60
+
61
+ o: Any
62
+ name: str
63
+ custom_op: Callable
64
+ orig_op: Optional[Callable] = None
65
+ op_wrapper: Optional[Callable] = None
66
+
67
+
68
+ class OnnxConfig(ABC):
69
+ """
70
+ Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format.
71
+ """
72
+
73
+ default_fixed_batch = 2
74
+ default_fixed_sequence = 8
75
+ default_fixed_num_choices = 4
76
+ torch_onnx_minimum_version = version.parse("1.8")
77
+ _tasks_to_common_outputs = {
78
+ "causal-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
79
+ "default": OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}}),
80
+ "image-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
81
+ "image-segmentation": OrderedDict(
82
+ {
83
+ "logits": {0: "batch", 1: "sequence"},
84
+ "pred_boxes": {0: "batch", 1: "sequence"},
85
+ "pred_masks": {0: "batch", 1: "sequence"},
86
+ }
87
+ ),
88
+ "masked-im": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
89
+ "masked-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
90
+ "multiple-choice": OrderedDict({"logits": {0: "batch"}}),
91
+ "object-detection": OrderedDict(
92
+ {
93
+ "logits": {0: "batch", 1: "sequence"},
94
+ "pred_boxes": {0: "batch", 1: "sequence"},
95
+ }
96
+ ),
97
+ "question-answering": OrderedDict(
98
+ {
99
+ "start_logits": {0: "batch", 1: "sequence"},
100
+ "end_logits": {0: "batch", 1: "sequence"},
101
+ }
102
+ ),
103
+ "semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}),
104
+ "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}),
105
+ "sequence-classification": OrderedDict({"logits": {0: "batch"}}),
106
+ "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
107
+ "vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
108
+ "speech2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
109
+ }
110
+
111
+ def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None):
112
+ self._config = config
113
+
114
+ if task not in self._tasks_to_common_outputs:
115
+ raise ValueError(
116
+ f"{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}"
117
+ )
118
+ self.task = task
119
+
120
+ self._patching_specs = []
121
+ for spec in patching_specs if patching_specs is not None else []:
122
+ final_spec = spec
123
+ if spec.orig_op is None:
124
+ final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name))
125
+ self._patching_specs.append(final_spec)
126
+
127
+ @classmethod
128
+ def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig":
129
+ """
130
+ Instantiate a OnnxConfig for a specific model
131
+
132
+ Args:
133
+ config: The model's configuration to use when exporting to ONNX
134
+
135
+ Returns:
136
+ OnnxConfig for this model
137
+ """
138
+ return cls(config, task=task)
139
+
140
+ @property
141
+ @abstractmethod
142
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
143
+ """
144
+ Mapping containing the axis definition of the input tensors to provide to the model
145
+
146
+ Returns:
147
+ For each input: its name associated to the axes symbolic name and the axis position within the tensor
148
+ """
149
+ raise NotImplementedError()
150
+
151
+ @property
152
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
153
+ """
154
+ Mapping containing the axis definition of the output tensors to provide to the model
155
+
156
+ Returns:
157
+ For each output: its name associated to the axes symbolic name and the axis position within the tensor
158
+ """
159
+ common_outputs = self._tasks_to_common_outputs[self.task]
160
+ return copy.deepcopy(common_outputs)
161
+
162
+ @property
163
+ def values_override(self) -> Optional[Mapping[str, Any]]:
164
+ """
165
+ Dictionary of keys to override in the model's config before exporting
166
+
167
+ Returns:
168
+ Dictionary with the keys (and their corresponding values) to override
169
+ """
170
+ if hasattr(self._config, "use_cache"):
171
+ return {"use_cache": False}
172
+
173
+ return None
174
+
175
+ @property
176
+ def default_batch_size(self) -> int:
177
+ """
178
+ The default batch size to use if no other indication
179
+
180
+ Returns:
181
+ Integer > 0
182
+ """
183
+ # Using 2 avoid ONNX making assumption about single sample batch
184
+ return OnnxConfig.default_fixed_batch
185
+
186
+ @property
187
+ def default_sequence_length(self) -> int:
188
+ """
189
+ The default sequence length to use if no other indication
190
+
191
+ Returns:
192
+ Integer > 0
193
+ """
194
+ return OnnxConfig.default_fixed_sequence
195
+
196
+ @property
197
+ def default_num_choices(self) -> int:
198
+ """
199
+ The default number of choices to use if no other indication
200
+
201
+ Returns:
202
+ Integer > 0
203
+ """
204
+ return OnnxConfig.default_fixed_num_choices
205
+
206
+ @property
207
+ def default_onnx_opset(self) -> int:
208
+ """
209
+ Which onnx opset to use when exporting the model
210
+
211
+ Returns:
212
+ Integer ONNX Opset version
213
+ """
214
+ return DEFAULT_ONNX_OPSET
215
+
216
+ @property
217
+ def atol_for_validation(self) -> float:
218
+ """
219
+ What absolute tolerance value to use during model conversion validation.
220
+
221
+ Returns:
222
+ Float absolute tolerance value.
223
+ """
224
+ return 1e-5
225
+
226
+ @property
227
+ def is_torch_support_available(self) -> bool:
228
+ """
229
+ The minimum PyTorch version required to export the model.
230
+
231
+ Returns:
232
+ `bool`: Whether the installed version of PyTorch is compatible with the model.
233
+ """
234
+ if is_torch_available():
235
+ from transformers.utils import get_torch_version
236
+
237
+ return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version
238
+ else:
239
+ return False
240
+
241
+ @staticmethod
242
+ def use_external_data_format(num_parameters: int) -> bool:
243
+ """
244
+ Flag indicating if the model requires using external data format
245
+
246
+ Args:
247
+ num_parameters: Number of parameter on the model
248
+
249
+ Returns:
250
+ True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise
251
+ """
252
+
253
+ return (
254
+ compute_serialized_parameters_size(num_parameters, ParameterFormat.Float)
255
+ >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT
256
+ )
257
+
258
+ def _generate_dummy_images(
259
+ self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40
260
+ ):
261
+ images = []
262
+ for _ in range(batch_size):
263
+ data = np.random.rand(image_height, image_width, num_channels) * 255
264
+ images.append(Image.fromarray(data.astype("uint8")).convert("RGB"))
265
+ return images
266
+
267
+ def _generate_dummy_audio(
268
+ self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220
269
+ ):
270
+ audio_data = []
271
+ for _ in range(batch_size):
272
+ # time variable
273
+ t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False)
274
+
275
+ # generate pure sine wave at `frequency` Hz
276
+ audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t))
277
+
278
+ return audio_data
279
+
280
+ def generate_dummy_inputs(
281
+ self,
282
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin", "ImageProcessingMixin"],
283
+ batch_size: int = -1,
284
+ seq_length: int = -1,
285
+ num_choices: int = -1,
286
+ is_pair: bool = False,
287
+ framework: Optional[TensorType] = None,
288
+ num_channels: int = 3,
289
+ image_width: int = 40,
290
+ image_height: int = 40,
291
+ sampling_rate: int = 22050,
292
+ time_duration: float = 5.0,
293
+ frequency: int = 220,
294
+ tokenizer: "PreTrainedTokenizerBase" = None,
295
+ ) -> Mapping[str, Any]:
296
+ """
297
+ Generate inputs to provide to the ONNX exporter for the specific framework
298
+
299
+ Args:
300
+ preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]):
301
+ The preprocessor associated with this model configuration.
302
+ batch_size (`int`, *optional*, defaults to -1):
303
+ The batch size to export the model for (-1 means dynamic axis).
304
+ num_choices (`int`, *optional*, defaults to -1):
305
+ The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
306
+ seq_length (`int`, *optional*, defaults to -1):
307
+ The sequence length to export the model for (-1 means dynamic axis).
308
+ is_pair (`bool`, *optional*, defaults to `False`):
309
+ Indicate if the input is a pair (sentence 1, sentence 2)
310
+ framework (`TensorType`, *optional*, defaults to `None`):
311
+ The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.
312
+ num_channels (`int`, *optional*, defaults to 3):
313
+ The number of channels of the generated images.
314
+ image_width (`int`, *optional*, defaults to 40):
315
+ The width of the generated images.
316
+ image_height (`int`, *optional*, defaults to 40):
317
+ The height of the generated images.
318
+ sampling_rate (`int`, *optional* defaults to 22050)
319
+ The sampling rate for audio data generation.
320
+ time_duration (`float`, *optional* defaults to 5.0)
321
+ Total seconds of sampling for audio data generation.
322
+ frequency (`int`, *optional* defaults to 220)
323
+ The desired natural frequency of generated audio.
324
+
325
+ Returns:
326
+ Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
327
+ """
328
+ from ..feature_extraction_utils import FeatureExtractionMixin
329
+ from ..image_processing_utils import ImageProcessingMixin
330
+ from ..tokenization_utils_base import PreTrainedTokenizerBase
331
+
332
+ if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
333
+ raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.")
334
+ if tokenizer is not None:
335
+ warnings.warn(
336
+ "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
337
+ " `preprocessor` instead.",
338
+ FutureWarning,
339
+ )
340
+ logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
341
+ preprocessor = tokenizer
342
+ if isinstance(preprocessor, PreTrainedTokenizerBase):
343
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
344
+ batch_size = compute_effective_axis_dimension(
345
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
346
+ )
347
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
348
+ token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
349
+ seq_length = compute_effective_axis_dimension(
350
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
351
+ )
352
+ # Generate dummy inputs according to compute batch and sequence
353
+ input_token = (
354
+ preprocessor.unk_token
355
+ if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0)
356
+ else "0"
357
+ )
358
+ dummy_input = [" ".join([input_token]) * seq_length] * batch_size
359
+ if self.task == "multiple-choice":
360
+ # If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations
361
+ # made by ONNX
362
+ num_choices = compute_effective_axis_dimension(
363
+ num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0
364
+ )
365
+ dummy_input = dummy_input * num_choices
366
+ # The shape of the tokenized inputs values is [batch_size * num_choices, seq_length]
367
+ tokenized_input = preprocessor(dummy_input, text_pair=dummy_input)
368
+ # Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length]
369
+ for k, v in tokenized_input.items():
370
+ tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)]
371
+ return dict(tokenized_input.convert_to_tensors(tensor_type=framework))
372
+ return dict(preprocessor(dummy_input, return_tensors=framework))
373
+ elif isinstance(preprocessor, ImageProcessingMixin):
374
+ if preprocessor.model_input_names[0] != "pixel_values":
375
+ raise ValueError(
376
+ f"The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects"
377
+ f' `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}'
378
+ )
379
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
380
+ batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
381
+ dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
382
+ return dict(preprocessor(images=dummy_input, return_tensors=framework))
383
+ elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values":
384
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
385
+ batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
386
+ dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
387
+ return dict(preprocessor(images=dummy_input, return_tensors=framework))
388
+ elif (
389
+ isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features"
390
+ ):
391
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
392
+ batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
393
+ dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency)
394
+ return dict(preprocessor(dummy_input, return_tensors=framework))
395
+ else:
396
+ raise ValueError(
397
+ "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor."
398
+ )
399
+
400
+ def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]:
401
+ """
402
+ Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq
403
+ models which have the encoder and decoder exported as separate ONNX files.
404
+
405
+ Args:
406
+ reference_model_inputs ([`Mapping[str, Tensor]`):
407
+ Reference inputs for the model.
408
+
409
+ Returns:
410
+ `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function
411
+ """
412
+ return reference_model_inputs
413
+
414
+ def patch_ops(self):
415
+ for spec in self._patching_specs:
416
+ custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op)
417
+ setattr(spec.o, spec.name, custom_op)
418
+
419
+ def restore_ops(self):
420
+ for spec in self._patching_specs:
421
+ orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op)
422
+ setattr(spec.o, spec.name, orig_op)
423
+
424
+ @classmethod
425
+ def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]:
426
+ """
427
+ Flatten any potential nested structure expanding the name of the field with the index of the element within the
428
+ structure.
429
+
430
+ Args:
431
+ name: The name of the nested structure
432
+ field: The structure to, potentially, be flattened
433
+
434
+ Returns:
435
+ (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure.
436
+
437
+ """
438
+ from itertools import chain
439
+
440
+ return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))}
441
+
442
+
443
+ class OnnxConfigWithPast(OnnxConfig, ABC):
444
+ def __init__(
445
+ self,
446
+ config: "PretrainedConfig",
447
+ task: str = "default",
448
+ patching_specs: List[PatchingSpec] = None,
449
+ use_past: bool = False,
450
+ ):
451
+ super().__init__(config, task=task, patching_specs=patching_specs)
452
+ self.use_past = use_past
453
+
454
+ @classmethod
455
+ def with_past(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfigWithPast":
456
+ """
457
+ Instantiate a OnnxConfig with `use_past` attribute set to True
458
+
459
+ Args:
460
+ config: The underlying model's config to use when exporting to ONNX
461
+
462
+ Returns:
463
+ OnnxConfig with `.use_past = True`
464
+ """
465
+ return cls(config, task=task, use_past=True)
466
+
467
+ @property
468
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
469
+ common_outputs = super().outputs
470
+ if self.use_past:
471
+ self.fill_with_past_key_values_(common_outputs, direction="outputs")
472
+
473
+ return common_outputs
474
+
475
+ @property
476
+ def values_override(self) -> Optional[Mapping[str, Any]]:
477
+ if hasattr(self._config, "use_cache"):
478
+ return {"use_cache": self.use_past}
479
+
480
+ return None
481
+
482
+ @property
483
+ def num_layers(self) -> int:
484
+ """
485
+ The number of layers attribute retrieved from the model config. Override this for model configs where the
486
+ number of layers attribute is not called `num_layers`.
487
+ """
488
+ if not hasattr(self._config, "num_layers"):
489
+ raise AttributeError(
490
+ "could not find the number of layers attribute in the model configuration, override the num_layers"
491
+ " property of the model OnnxConfig to solve this"
492
+ )
493
+ return self._config.num_layers
494
+
495
+ @property
496
+ def num_attention_heads(self) -> int:
497
+ """
498
+ The number of attention heads attribute retrieved from the model config. Override this for model configs where
499
+ the number of attention heads attribute is not called `num_attention_heads`.
500
+ """
501
+ if not hasattr(self._config, "num_attention_heads"):
502
+ raise AttributeError(
503
+ "could not find the number of attention heads attribute in the model configuration, override the"
504
+ " num_attention_heads property of the model OnnxConfig to solve this"
505
+ )
506
+ return self._config.num_attention_heads
507
+
508
+ def generate_dummy_inputs(
509
+ self,
510
+ tokenizer: "PreTrainedTokenizerBase",
511
+ batch_size: int = -1,
512
+ seq_length: int = -1,
513
+ is_pair: bool = False,
514
+ framework: Optional[TensorType] = None,
515
+ ) -> Mapping[str, Any]:
516
+ # TODO: should we set seq_length = 1 when self.use_past = True?
517
+ common_inputs = super().generate_dummy_inputs(
518
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
519
+ )
520
+
521
+ if self.use_past:
522
+ if not is_torch_available():
523
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
524
+ else:
525
+ import torch
526
+
527
+ batch, seqlen = common_inputs["input_ids"].shape
528
+ # Not using the same length for past_key_values
529
+ past_key_values_length = seqlen + 2
530
+ shape = (
531
+ batch,
532
+ self.num_attention_heads,
533
+ past_key_values_length,
534
+ self._config.hidden_size // self.num_attention_heads,
535
+ )
536
+
537
+ if "attention_mask" in common_inputs:
538
+ mask_dtype = common_inputs["attention_mask"].dtype
539
+ common_inputs["attention_mask"] = torch.cat(
540
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)],
541
+ dim=1,
542
+ )
543
+
544
+ common_inputs["past_key_values"] = []
545
+ for _ in range(self.num_layers):
546
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
547
+
548
+ return common_inputs
549
+
550
+ def fill_with_past_key_values_(
551
+ self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool = False
552
+ ):
553
+ """
554
+ Fill the input_or_outputs mapping with past_key_values dynamic axes considering.
555
+
556
+ Args:
557
+ inputs_or_outputs: The mapping to fill.
558
+ direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the
559
+ output mapping, this is important for axes naming.
560
+ inverted_values_shape:
561
+ If `True`, store values on dynamic axis 1, else on axis 2.
562
+
563
+ """
564
+ if direction not in ["inputs", "outputs"]:
565
+ raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
566
+
567
+ name = "past_key_values" if direction == "inputs" else "present"
568
+ for i in range(self.num_layers):
569
+ inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
570
+ if inverted_values_shape:
571
+ inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 1: "past_sequence + sequence"}
572
+ else:
573
+ inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
574
+
575
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
576
+ flattened_output[f"{name}.{idx}.key"] = t[0]
577
+ flattened_output[f"{name}.{idx}.value"] = t[1]
578
+
579
+ def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]:
580
+ flattened_output = {}
581
+ if name in ["present", "past_key_values"]:
582
+ for idx, t in enumerate(field):
583
+ self._flatten_past_key_values_(flattened_output, name, idx, t)
584
+ else:
585
+ flattened_output = super().flatten_output_collection_property(name, field)
586
+
587
+ return flattened_output
588
+
589
+
590
+ class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast):
591
+ @property
592
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
593
+ common_outputs = super(OnnxConfigWithPast, self).outputs
594
+ # Renaming the outputs axes properly.
595
+ for name, axes_names in common_outputs.items():
596
+ sequence_name = "encoder_sequence" if "encoder" in name else "decoder_sequence"
597
+ for axis_idx, name in axes_names.items():
598
+ if "sequence" in name:
599
+ axes_names[axis_idx] = sequence_name
600
+ # We reset the value as the order in common_outputs (OrderedDict) is lost otherwise
601
+ else:
602
+ axes_names[axis_idx] = name
603
+ if self.use_past:
604
+ self.fill_with_past_key_values_(common_outputs, direction="outputs")
605
+
606
+ return common_outputs
607
+
608
+ @property
609
+ def num_layers(self) -> Tuple[int]:
610
+ try:
611
+ num_layers = super().num_layers
612
+ num_layers = (num_layers, num_layers)
613
+ except AttributeError:
614
+ if hasattr(self._config, "encoder_layers") and hasattr(self._config, "decoder_layers"):
615
+ num_layers = (self._config.encoder_layers, self._config.decoder_layers)
616
+ else:
617
+ raise AttributeError(
618
+ "could not find the number of encoder and decoder layers attributes in the model configuration,"
619
+ " override the num_layers property of the model OnnxConfig to solve this"
620
+ )
621
+
622
+ return num_layers
623
+
624
+ @property
625
+ def num_attention_heads(self) -> Tuple[int]:
626
+ try:
627
+ num_attention_heads = super().num_attention_heads
628
+ num_attention_heads = (num_attention_heads, num_attention_heads)
629
+ except AttributeError:
630
+ if hasattr(self._config, "encoder_attention_heads") and hasattr(self._config, "decoder_attention_heads"):
631
+ num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads)
632
+ else:
633
+ raise AttributeError(
634
+ "could not find the number of attention heads for the encoder and the decoder attributes in the"
635
+ " model configuration, override the num_attention_heads property of the model OnnxConfig to solve"
636
+ " this"
637
+ )
638
+ return num_attention_heads
639
+
640
+ def generate_dummy_inputs(
641
+ self,
642
+ tokenizer: "PreTrainedTokenizerBase",
643
+ batch_size: int = -1,
644
+ seq_length: int = -1,
645
+ is_pair: bool = False,
646
+ framework: Optional[TensorType] = None,
647
+ ) -> Mapping[str, Any]:
648
+ encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
649
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
650
+ )
651
+
652
+ # Generate decoder inputs
653
+ decoder_seq_length = seq_length if not self.use_past else 1
654
+ decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
655
+ tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair, framework=framework
656
+ )
657
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
658
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
659
+
660
+ if self.use_past:
661
+ if not is_torch_available():
662
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
663
+ else:
664
+ import torch
665
+ batch = common_inputs["input_ids"].shape[0]
666
+ encoder_seq_length = common_inputs["input_ids"].shape[1]
667
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
668
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
669
+ encoder_shape = (
670
+ batch,
671
+ num_encoder_attention_heads,
672
+ encoder_seq_length,
673
+ self._config.hidden_size // num_encoder_attention_heads,
674
+ )
675
+ decoder_shape = (
676
+ batch,
677
+ num_decoder_attention_heads,
678
+ # Not using the same length for past_key_values
679
+ decoder_seq_length + 3,
680
+ self._config.hidden_size // num_decoder_attention_heads,
681
+ )
682
+
683
+ common_inputs["past_key_values"] = []
684
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
685
+ num_encoder_layers, num_decoder_layers = self.num_layers
686
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
687
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
688
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
689
+
690
+ for _ in range(min_num_layers):
691
+ # For encoder-decoder models, past_key_values contains pre-computed values for both the encoder and the
692
+ # decoder layers, hence a tuple of 4 tensors instead of 2
693
+ common_inputs["past_key_values"].append(
694
+ (
695
+ torch.zeros(decoder_shape),
696
+ torch.zeros(decoder_shape),
697
+ torch.zeros(encoder_shape),
698
+ torch.zeros(encoder_shape),
699
+ )
700
+ )
701
+
702
+ # TODO: test this.
703
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
704
+ for _ in range(min_num_layers, max_num_layers):
705
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
706
+
707
+ return common_inputs
708
+
709
+ def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):
710
+ if direction not in ["inputs", "outputs"]:
711
+ raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
712
+
713
+ name = "past_key_values" if direction == "inputs" else "present"
714
+
715
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
716
+ num_encoder_layers, num_decoder_layers = self.num_layers
717
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
718
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
719
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
720
+
721
+ encoder_sequence = "past_encoder_sequence"
722
+ decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence"
723
+
724
+ for i in range(min_num_layers):
725
+ inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence}
726
+ inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence}
727
+ inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence}
728
+ inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence}
729
+
730
+ for i in range(min_num_layers, max_num_layers):
731
+ if remaining_side_name == "encoder":
732
+ axes_info = {0: "batch", 2: encoder_sequence}
733
+ else:
734
+ axes_info = {0: "batch", 2: decoder_sequence}
735
+ inputs_or_outputs[f"{name}.{i}.{remaining_side_name}.key"] = axes_info
736
+
737
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
738
+ flattened_output[f"{name}.{idx}.decoder.key"] = t[0]
739
+ flattened_output[f"{name}.{idx}.decoder.value"] = t[1]
740
+ flattened_output[f"{name}.{idx}.encoder.key"] = t[2]
741
+ flattened_output[f"{name}.{idx}.encoder.value"] = t[3]
venv/lib/python3.10/site-packages/transformers/onnx/convert.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from inspect import signature
17
+ from itertools import chain
18
+ from pathlib import Path
19
+ from typing import TYPE_CHECKING, Iterable, List, Tuple, Union
20
+
21
+ import numpy as np
22
+ from packaging.version import Version, parse
23
+
24
+ from ..tokenization_utils_base import PreTrainedTokenizerBase
25
+ from ..utils import (
26
+ TensorType,
27
+ is_tf_available,
28
+ is_torch_available,
29
+ logging,
30
+ )
31
+ from .config import OnnxConfig
32
+
33
+
34
+ if is_torch_available():
35
+ from ..modeling_utils import PreTrainedModel
36
+
37
+ if is_tf_available():
38
+ from ..modeling_tf_utils import TFPreTrainedModel
39
+
40
+ if TYPE_CHECKING:
41
+ from ..feature_extraction_utils import FeatureExtractionMixin
42
+ from ..processing_utils import ProcessorMixin
43
+ from ..tokenization_utils import PreTrainedTokenizer
44
+
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ # This is the minimal required version to support some ONNX Runtime features
50
+ ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0")
51
+
52
+
53
+ def check_onnxruntime_requirements(minimum_version: Version):
54
+ """
55
+ Check onnxruntime is installed and if the installed version match is recent enough
56
+
57
+ Raises:
58
+ ImportError: If onnxruntime is not installed or too old version is found
59
+ """
60
+ try:
61
+ import onnxruntime
62
+
63
+ # Parse the version of the installed onnxruntime
64
+ ort_version = parse(onnxruntime.__version__)
65
+
66
+ # We require 1.4.0 minimum
67
+ if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:
68
+ raise ImportError(
69
+ f"We found an older version of onnxruntime ({onnxruntime.__version__}) "
70
+ f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n"
71
+ "Please update onnxruntime by running `pip install --upgrade onnxruntime`"
72
+ )
73
+
74
+ except ImportError:
75
+ raise ImportError(
76
+ "onnxruntime doesn't seem to be currently installed. "
77
+ "Please install the onnxruntime by running `pip install onnxruntime`"
78
+ " and relaunch the conversion."
79
+ )
80
+
81
+
82
+ def export_pytorch(
83
+ preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"],
84
+ model: "PreTrainedModel",
85
+ config: OnnxConfig,
86
+ opset: int,
87
+ output: Path,
88
+ tokenizer: "PreTrainedTokenizer" = None,
89
+ device: str = "cpu",
90
+ ) -> Tuple[List[str], List[str]]:
91
+ """
92
+ Export a PyTorch model to an ONNX Intermediate Representation (IR)
93
+
94
+ Args:
95
+ preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]):
96
+ The preprocessor used for encoding the data.
97
+ model ([`PreTrainedModel`]):
98
+ The model to export.
99
+ config ([`~onnx.config.OnnxConfig`]):
100
+ The ONNX configuration associated with the exported model.
101
+ opset (`int`):
102
+ The version of the ONNX operator set to use.
103
+ output (`Path`):
104
+ Directory to store the exported ONNX model.
105
+ device (`str`, *optional*, defaults to `cpu`):
106
+ The device on which the ONNX model will be exported. Either `cpu` or `cuda`.
107
+
108
+ Returns:
109
+ `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from
110
+ the ONNX configuration.
111
+ """
112
+
113
+ if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
114
+ raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.")
115
+ if tokenizer is not None:
116
+ warnings.warn(
117
+ "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
118
+ " `preprocessor` instead.",
119
+ FutureWarning,
120
+ )
121
+ logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
122
+ preprocessor = tokenizer
123
+
124
+ if issubclass(type(model), PreTrainedModel):
125
+ import torch
126
+ from torch.onnx import export as onnx_export
127
+
128
+ logger.info(f"Using framework PyTorch: {torch.__version__}")
129
+ with torch.no_grad():
130
+ model.config.return_dict = True
131
+ model.eval()
132
+
133
+ # Check if we need to override certain configuration item
134
+ if config.values_override is not None:
135
+ logger.info(f"Overriding {len(config.values_override)} configuration item(s)")
136
+ for override_config_key, override_config_value in config.values_override.items():
137
+ logger.info(f"\t- {override_config_key} -> {override_config_value}")
138
+ setattr(model.config, override_config_key, override_config_value)
139
+
140
+ # Ensure inputs match
141
+ # TODO: Check when exporting QA we provide "is_pair=True"
142
+ model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH)
143
+ device = torch.device(device)
144
+ if device.type == "cuda" and torch.cuda.is_available():
145
+ model.to(device)
146
+ model_inputs_device = {}
147
+ for k, v in model_inputs.items():
148
+ if isinstance(v, Tuple):
149
+ model_inputs_device[k] = tuple(
150
+ x.to(device) if isinstance(x, torch.Tensor) else None for x in v
151
+ )
152
+ elif isinstance(v, List):
153
+ model_inputs_device[k] = [
154
+ tuple(x.to(device) if isinstance(x, torch.Tensor) else None for x in t) for t in v
155
+ ]
156
+ else:
157
+ model_inputs_device[k] = v.to(device)
158
+
159
+ model_inputs = model_inputs_device
160
+
161
+ inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys())
162
+ onnx_outputs = list(config.outputs.keys())
163
+
164
+ if not inputs_match:
165
+ raise ValueError("Model and config inputs doesn't match")
166
+
167
+ config.patch_ops()
168
+
169
+ onnx_export(
170
+ model,
171
+ (model_inputs,),
172
+ f=output.as_posix(),
173
+ input_names=list(config.inputs.keys()),
174
+ output_names=onnx_outputs,
175
+ dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())),
176
+ do_constant_folding=True,
177
+ opset_version=opset,
178
+ )
179
+
180
+ config.restore_ops()
181
+
182
+ return matched_inputs, onnx_outputs
183
+
184
+
185
+ def export_tensorflow(
186
+ preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin"],
187
+ model: "TFPreTrainedModel",
188
+ config: OnnxConfig,
189
+ opset: int,
190
+ output: Path,
191
+ tokenizer: "PreTrainedTokenizer" = None,
192
+ ) -> Tuple[List[str], List[str]]:
193
+ """
194
+ Export a TensorFlow model to an ONNX Intermediate Representation (IR)
195
+
196
+ Args:
197
+ preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]):
198
+ The preprocessor used for encoding the data.
199
+ model ([`TFPreTrainedModel`]):
200
+ The model to export.
201
+ config ([`~onnx.config.OnnxConfig`]):
202
+ The ONNX configuration associated with the exported model.
203
+ opset (`int`):
204
+ The version of the ONNX operator set to use.
205
+ output (`Path`):
206
+ Directory to store the exported ONNX model.
207
+
208
+ Returns:
209
+ `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from
210
+ the ONNX configuration.
211
+ """
212
+ import onnx
213
+ import tensorflow as tf
214
+ import tf2onnx
215
+
216
+ if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
217
+ raise ValueError("You cannot provide both a tokenizer and preprocessor to export the model.")
218
+ if tokenizer is not None:
219
+ warnings.warn(
220
+ "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
221
+ " `preprocessor` instead.",
222
+ FutureWarning,
223
+ )
224
+ logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
225
+ preprocessor = tokenizer
226
+
227
+ model.config.return_dict = True
228
+
229
+ # Check if we need to override certain configuration item
230
+ if config.values_override is not None:
231
+ logger.info(f"Overriding {len(config.values_override)} configuration item(s)")
232
+ for override_config_key, override_config_value in config.values_override.items():
233
+ logger.info(f"\t- {override_config_key} -> {override_config_value}")
234
+ setattr(model.config, override_config_key, override_config_value)
235
+
236
+ # Ensure inputs match
237
+ model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW)
238
+ inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys())
239
+ onnx_outputs = list(config.outputs.keys())
240
+
241
+ input_signature = [
242
+ tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items()
243
+ ]
244
+ onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset)
245
+ onnx.save(onnx_model, output.as_posix())
246
+ config.restore_ops()
247
+
248
+ return matched_inputs, onnx_outputs
249
+
250
+
251
+ def export(
252
+ preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"],
253
+ model: Union["PreTrainedModel", "TFPreTrainedModel"],
254
+ config: OnnxConfig,
255
+ opset: int,
256
+ output: Path,
257
+ tokenizer: "PreTrainedTokenizer" = None,
258
+ device: str = "cpu",
259
+ ) -> Tuple[List[str], List[str]]:
260
+ """
261
+ Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR)
262
+
263
+ Args:
264
+ preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]):
265
+ The preprocessor used for encoding the data.
266
+ model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
267
+ The model to export.
268
+ config ([`~onnx.config.OnnxConfig`]):
269
+ The ONNX configuration associated with the exported model.
270
+ opset (`int`):
271
+ The version of the ONNX operator set to use.
272
+ output (`Path`):
273
+ Directory to store the exported ONNX model.
274
+ device (`str`, *optional*, defaults to `cpu`):
275
+ The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for
276
+ export on CUDA devices.
277
+
278
+ Returns:
279
+ `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from
280
+ the ONNX configuration.
281
+ """
282
+ if not (is_torch_available() or is_tf_available()):
283
+ raise ImportError(
284
+ "Cannot convert because neither PyTorch nor TensorFlow are not installed. "
285
+ "Please install torch or tensorflow first."
286
+ )
287
+
288
+ if is_tf_available() and isinstance(model, TFPreTrainedModel) and device == "cuda":
289
+ raise RuntimeError("`tf2onnx` does not support export on CUDA device.")
290
+
291
+ if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
292
+ raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.")
293
+ if tokenizer is not None:
294
+ warnings.warn(
295
+ "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
296
+ " `preprocessor` instead.",
297
+ FutureWarning,
298
+ )
299
+ logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
300
+ preprocessor = tokenizer
301
+
302
+ if is_torch_available():
303
+ from ..utils import get_torch_version
304
+
305
+ if not config.is_torch_support_available:
306
+ logger.warning(
307
+ f"Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version},"
308
+ f" got: {get_torch_version()}"
309
+ )
310
+
311
+ if is_torch_available() and issubclass(type(model), PreTrainedModel):
312
+ return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device)
313
+ elif is_tf_available() and issubclass(type(model), TFPreTrainedModel):
314
+ return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer)
315
+
316
+
317
+ def validate_model_outputs(
318
+ config: OnnxConfig,
319
+ preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"],
320
+ reference_model: Union["PreTrainedModel", "TFPreTrainedModel"],
321
+ onnx_model: Path,
322
+ onnx_named_outputs: List[str],
323
+ atol: float,
324
+ tokenizer: "PreTrainedTokenizer" = None,
325
+ ):
326
+ from onnxruntime import InferenceSession, SessionOptions
327
+
328
+ logger.info("Validating ONNX model...")
329
+
330
+ if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
331
+ raise ValueError("You cannot provide both a tokenizer and a preprocessor to validate the model outputs.")
332
+ if tokenizer is not None:
333
+ warnings.warn(
334
+ "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
335
+ " `preprocessor` instead.",
336
+ FutureWarning,
337
+ )
338
+ logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
339
+ preprocessor = tokenizer
340
+
341
+ # generate inputs with a different batch_size and seq_len that was used for conversion to properly test
342
+ # dynamic input shapes.
343
+ if is_torch_available() and issubclass(type(reference_model), PreTrainedModel):
344
+ reference_model_inputs = config.generate_dummy_inputs(
345
+ preprocessor,
346
+ batch_size=config.default_fixed_batch + 1,
347
+ seq_length=config.default_fixed_sequence + 1,
348
+ framework=TensorType.PYTORCH,
349
+ )
350
+ else:
351
+ reference_model_inputs = config.generate_dummy_inputs(
352
+ preprocessor,
353
+ batch_size=config.default_fixed_batch + 1,
354
+ seq_length=config.default_fixed_sequence + 1,
355
+ framework=TensorType.TENSORFLOW,
356
+ )
357
+
358
+ # Create ONNX Runtime session
359
+ options = SessionOptions()
360
+ session = InferenceSession(onnx_model.as_posix(), options, providers=["CPUExecutionProvider"])
361
+
362
+ # Compute outputs from the reference model
363
+ if is_torch_available() and issubclass(type(reference_model), PreTrainedModel):
364
+ reference_model.to("cpu")
365
+ ref_outputs = reference_model(**reference_model_inputs)
366
+ ref_outputs_dict = {}
367
+
368
+ # We flatten potential collection of outputs (i.e. past_keys) to a flat structure
369
+ for name, value in ref_outputs.items():
370
+ # Overwriting the output name as "present" since it is the name used for the ONNX outputs
371
+ # ("past_key_values" being taken for the ONNX inputs)
372
+ if name == "past_key_values":
373
+ name = "present"
374
+ if isinstance(value, (list, tuple)):
375
+ value = config.flatten_output_collection_property(name, value)
376
+ ref_outputs_dict.update(value)
377
+ else:
378
+ ref_outputs_dict[name] = value
379
+
380
+ # Create onnxruntime inputs from the reference model inputs
381
+ reference_model_inputs_onnxruntime = config.generate_dummy_inputs_onnxruntime(reference_model_inputs)
382
+
383
+ # We flatten potential collection of inputs (i.e. past_keys)
384
+ onnx_inputs = {}
385
+ for name, value in reference_model_inputs_onnxruntime.items():
386
+ if isinstance(value, (list, tuple)):
387
+ value = config.flatten_output_collection_property(name, value)
388
+ onnx_inputs.update({tensor_name: pt_tensor.numpy() for tensor_name, pt_tensor in value.items()})
389
+ else:
390
+ onnx_inputs[name] = value.numpy()
391
+
392
+ # Compute outputs from the ONNX model
393
+ onnx_outputs = session.run(onnx_named_outputs, onnx_inputs)
394
+
395
+ # Check we have a subset of the keys into onnx_outputs against ref_outputs
396
+ ref_outputs_set, onnx_outputs_set = set(ref_outputs_dict.keys()), set(onnx_named_outputs)
397
+ if not onnx_outputs_set.issubset(ref_outputs_set):
398
+ logger.info(
399
+ f"\t-[x] ONNX model output names {onnx_outputs_set} do not match reference model {ref_outputs_set}"
400
+ )
401
+
402
+ raise ValueError(
403
+ "Outputs doesn't match between reference model and ONNX exported model: "
404
+ f"{onnx_outputs_set.difference(ref_outputs_set)}"
405
+ )
406
+ else:
407
+ logger.info(f"\t-[✓] ONNX model output names match reference model ({onnx_outputs_set})")
408
+
409
+ # Check the shape and values match
410
+ for name, ort_value in zip(onnx_named_outputs, onnx_outputs):
411
+ if is_torch_available() and issubclass(type(reference_model), PreTrainedModel):
412
+ ref_value = ref_outputs_dict[name].detach().numpy()
413
+ else:
414
+ ref_value = ref_outputs_dict[name].numpy()
415
+ logger.info(f'\t- Validating ONNX Model output "{name}":')
416
+
417
+ # Shape
418
+ if not ort_value.shape == ref_value.shape:
419
+ logger.info(f"\t\t-[x] shape {ort_value.shape} doesn't match {ref_value.shape}")
420
+ raise ValueError(
421
+ "Outputs shape doesn't match between reference model and ONNX exported model: "
422
+ f"Got {ref_value.shape} (reference) and {ort_value.shape} (ONNX)"
423
+ )
424
+ else:
425
+ logger.info(f"\t\t-[✓] {ort_value.shape} matches {ref_value.shape}")
426
+
427
+ # Values
428
+ if not np.allclose(ref_value, ort_value, atol=atol):
429
+ bad_indices = np.logical_not(np.isclose(ref_value, ort_value, atol=atol))
430
+ logger.info(f"\t\t-[x] values not close enough (atol: {atol})")
431
+ raise ValueError(
432
+ "Outputs values doesn't match between reference model and ONNX exported model: "
433
+ f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))} for "
434
+ f"{ref_value[bad_indices]} vs {ort_value[bad_indices]}"
435
+ )
436
+ else:
437
+ logger.info(f"\t\t-[✓] all values close (atol: {atol})")
438
+
439
+
440
+ def ensure_model_and_config_inputs_match(
441
+ model: Union["PreTrainedModel", "TFPreTrainedModel"], model_inputs: Iterable[str]
442
+ ) -> Tuple[bool, List[str]]:
443
+ """
444
+
445
+ :param model_inputs: :param config_inputs: :return:
446
+ """
447
+ if is_torch_available() and issubclass(type(model), PreTrainedModel):
448
+ forward_parameters = signature(model.forward).parameters
449
+ else:
450
+ forward_parameters = signature(model.call).parameters
451
+ model_inputs_set = set(model_inputs)
452
+
453
+ # We are fine if config_inputs has more keys than model_inputs
454
+ forward_inputs_set = set(forward_parameters.keys())
455
+ is_ok = model_inputs_set.issubset(forward_inputs_set)
456
+
457
+ # Make sure the input order match (VERY IMPORTANT !!!!)
458
+ matching_inputs = forward_inputs_set.intersection(model_inputs_set)
459
+ ordered_inputs = [parameter for parameter in forward_parameters.keys() if parameter in matching_inputs]
460
+ return is_ok, ordered_inputs
venv/lib/python3.10/site-packages/transformers/onnx/features.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from functools import partial, reduce
3
+ from typing import TYPE_CHECKING, Callable, Dict, Optional, Tuple, Type, Union
4
+
5
+ import transformers
6
+
7
+ from .. import PretrainedConfig, is_tf_available, is_torch_available
8
+ from ..utils import TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging
9
+ from .config import OnnxConfig
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from transformers import PreTrainedModel, TFPreTrainedModel
14
+
15
+
16
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
17
+
18
+ if is_torch_available():
19
+ from transformers.models.auto import (
20
+ AutoModel,
21
+ AutoModelForCausalLM,
22
+ AutoModelForImageClassification,
23
+ AutoModelForImageSegmentation,
24
+ AutoModelForMaskedImageModeling,
25
+ AutoModelForMaskedLM,
26
+ AutoModelForMultipleChoice,
27
+ AutoModelForObjectDetection,
28
+ AutoModelForQuestionAnswering,
29
+ AutoModelForSemanticSegmentation,
30
+ AutoModelForSeq2SeqLM,
31
+ AutoModelForSequenceClassification,
32
+ AutoModelForSpeechSeq2Seq,
33
+ AutoModelForTokenClassification,
34
+ AutoModelForVision2Seq,
35
+ )
36
+ if is_tf_available():
37
+ from transformers.models.auto import (
38
+ TFAutoModel,
39
+ TFAutoModelForCausalLM,
40
+ TFAutoModelForMaskedLM,
41
+ TFAutoModelForMultipleChoice,
42
+ TFAutoModelForQuestionAnswering,
43
+ TFAutoModelForSemanticSegmentation,
44
+ TFAutoModelForSeq2SeqLM,
45
+ TFAutoModelForSequenceClassification,
46
+ TFAutoModelForTokenClassification,
47
+ )
48
+ if not is_torch_available() and not is_tf_available():
49
+ logger.warning(
50
+ "The ONNX export features are only supported for PyTorch or TensorFlow. You will not be able to export models"
51
+ " without one of these libraries installed."
52
+ )
53
+
54
+
55
+ def supported_features_mapping(
56
+ *supported_features: str, onnx_config_cls: str = None
57
+ ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]:
58
+ """
59
+ Generate the mapping between supported the features and their corresponding OnnxConfig for a given model.
60
+
61
+ Args:
62
+ *supported_features: The names of the supported features.
63
+ onnx_config_cls: The OnnxConfig full name corresponding to the model.
64
+
65
+ Returns:
66
+ The dictionary mapping a feature to an OnnxConfig constructor.
67
+ """
68
+ if onnx_config_cls is None:
69
+ raise ValueError("A OnnxConfig class must be provided")
70
+
71
+ config_cls = transformers
72
+ for attr_name in onnx_config_cls.split("."):
73
+ config_cls = getattr(config_cls, attr_name)
74
+ mapping = {}
75
+ for feature in supported_features:
76
+ if "-with-past" in feature:
77
+ task = feature.replace("-with-past", "")
78
+ mapping[feature] = partial(config_cls.with_past, task=task)
79
+ else:
80
+ mapping[feature] = partial(config_cls.from_model_config, task=feature)
81
+
82
+ return mapping
83
+
84
+
85
+ class FeaturesManager:
86
+ _TASKS_TO_AUTOMODELS = {}
87
+ _TASKS_TO_TF_AUTOMODELS = {}
88
+ if is_torch_available():
89
+ _TASKS_TO_AUTOMODELS = {
90
+ "default": AutoModel,
91
+ "masked-lm": AutoModelForMaskedLM,
92
+ "causal-lm": AutoModelForCausalLM,
93
+ "seq2seq-lm": AutoModelForSeq2SeqLM,
94
+ "sequence-classification": AutoModelForSequenceClassification,
95
+ "token-classification": AutoModelForTokenClassification,
96
+ "multiple-choice": AutoModelForMultipleChoice,
97
+ "object-detection": AutoModelForObjectDetection,
98
+ "question-answering": AutoModelForQuestionAnswering,
99
+ "image-classification": AutoModelForImageClassification,
100
+ "image-segmentation": AutoModelForImageSegmentation,
101
+ "masked-im": AutoModelForMaskedImageModeling,
102
+ "semantic-segmentation": AutoModelForSemanticSegmentation,
103
+ "vision2seq-lm": AutoModelForVision2Seq,
104
+ "speech2seq-lm": AutoModelForSpeechSeq2Seq,
105
+ }
106
+ if is_tf_available():
107
+ _TASKS_TO_TF_AUTOMODELS = {
108
+ "default": TFAutoModel,
109
+ "masked-lm": TFAutoModelForMaskedLM,
110
+ "causal-lm": TFAutoModelForCausalLM,
111
+ "seq2seq-lm": TFAutoModelForSeq2SeqLM,
112
+ "sequence-classification": TFAutoModelForSequenceClassification,
113
+ "token-classification": TFAutoModelForTokenClassification,
114
+ "multiple-choice": TFAutoModelForMultipleChoice,
115
+ "question-answering": TFAutoModelForQuestionAnswering,
116
+ "semantic-segmentation": TFAutoModelForSemanticSegmentation,
117
+ }
118
+
119
+ # Set of model topologies we support associated to the features supported by each topology and the factory
120
+ _SUPPORTED_MODEL_TYPE = {
121
+ "albert": supported_features_mapping(
122
+ "default",
123
+ "masked-lm",
124
+ "sequence-classification",
125
+ "multiple-choice",
126
+ "token-classification",
127
+ "question-answering",
128
+ onnx_config_cls="models.albert.AlbertOnnxConfig",
129
+ ),
130
+ "bart": supported_features_mapping(
131
+ "default",
132
+ "default-with-past",
133
+ "causal-lm",
134
+ "causal-lm-with-past",
135
+ "seq2seq-lm",
136
+ "seq2seq-lm-with-past",
137
+ "sequence-classification",
138
+ "question-answering",
139
+ onnx_config_cls="models.bart.BartOnnxConfig",
140
+ ),
141
+ # BEiT cannot be used with the masked image modeling autoclass, so this feature is excluded here
142
+ "beit": supported_features_mapping(
143
+ "default", "image-classification", onnx_config_cls="models.beit.BeitOnnxConfig"
144
+ ),
145
+ "bert": supported_features_mapping(
146
+ "default",
147
+ "masked-lm",
148
+ "causal-lm",
149
+ "sequence-classification",
150
+ "multiple-choice",
151
+ "token-classification",
152
+ "question-answering",
153
+ onnx_config_cls="models.bert.BertOnnxConfig",
154
+ ),
155
+ "big-bird": supported_features_mapping(
156
+ "default",
157
+ "masked-lm",
158
+ "causal-lm",
159
+ "sequence-classification",
160
+ "multiple-choice",
161
+ "token-classification",
162
+ "question-answering",
163
+ onnx_config_cls="models.big_bird.BigBirdOnnxConfig",
164
+ ),
165
+ "bigbird-pegasus": supported_features_mapping(
166
+ "default",
167
+ "default-with-past",
168
+ "causal-lm",
169
+ "causal-lm-with-past",
170
+ "seq2seq-lm",
171
+ "seq2seq-lm-with-past",
172
+ "sequence-classification",
173
+ "question-answering",
174
+ onnx_config_cls="models.bigbird_pegasus.BigBirdPegasusOnnxConfig",
175
+ ),
176
+ "blenderbot": supported_features_mapping(
177
+ "default",
178
+ "default-with-past",
179
+ "causal-lm",
180
+ "causal-lm-with-past",
181
+ "seq2seq-lm",
182
+ "seq2seq-lm-with-past",
183
+ onnx_config_cls="models.blenderbot.BlenderbotOnnxConfig",
184
+ ),
185
+ "blenderbot-small": supported_features_mapping(
186
+ "default",
187
+ "default-with-past",
188
+ "causal-lm",
189
+ "causal-lm-with-past",
190
+ "seq2seq-lm",
191
+ "seq2seq-lm-with-past",
192
+ onnx_config_cls="models.blenderbot_small.BlenderbotSmallOnnxConfig",
193
+ ),
194
+ "bloom": supported_features_mapping(
195
+ "default",
196
+ "default-with-past",
197
+ "causal-lm",
198
+ "causal-lm-with-past",
199
+ "sequence-classification",
200
+ "token-classification",
201
+ onnx_config_cls="models.bloom.BloomOnnxConfig",
202
+ ),
203
+ "camembert": supported_features_mapping(
204
+ "default",
205
+ "masked-lm",
206
+ "causal-lm",
207
+ "sequence-classification",
208
+ "multiple-choice",
209
+ "token-classification",
210
+ "question-answering",
211
+ onnx_config_cls="models.camembert.CamembertOnnxConfig",
212
+ ),
213
+ "clip": supported_features_mapping(
214
+ "default",
215
+ onnx_config_cls="models.clip.CLIPOnnxConfig",
216
+ ),
217
+ "codegen": supported_features_mapping(
218
+ "default",
219
+ "causal-lm",
220
+ onnx_config_cls="models.codegen.CodeGenOnnxConfig",
221
+ ),
222
+ "convbert": supported_features_mapping(
223
+ "default",
224
+ "masked-lm",
225
+ "sequence-classification",
226
+ "multiple-choice",
227
+ "token-classification",
228
+ "question-answering",
229
+ onnx_config_cls="models.convbert.ConvBertOnnxConfig",
230
+ ),
231
+ "convnext": supported_features_mapping(
232
+ "default",
233
+ "image-classification",
234
+ onnx_config_cls="models.convnext.ConvNextOnnxConfig",
235
+ ),
236
+ "data2vec-text": supported_features_mapping(
237
+ "default",
238
+ "masked-lm",
239
+ "sequence-classification",
240
+ "multiple-choice",
241
+ "token-classification",
242
+ "question-answering",
243
+ onnx_config_cls="models.data2vec.Data2VecTextOnnxConfig",
244
+ ),
245
+ "data2vec-vision": supported_features_mapping(
246
+ "default",
247
+ "image-classification",
248
+ # ONNX doesn't support `adaptive_avg_pool2d` yet
249
+ # "semantic-segmentation",
250
+ onnx_config_cls="models.data2vec.Data2VecVisionOnnxConfig",
251
+ ),
252
+ "deberta": supported_features_mapping(
253
+ "default",
254
+ "masked-lm",
255
+ "sequence-classification",
256
+ "token-classification",
257
+ "question-answering",
258
+ onnx_config_cls="models.deberta.DebertaOnnxConfig",
259
+ ),
260
+ "deberta-v2": supported_features_mapping(
261
+ "default",
262
+ "masked-lm",
263
+ "sequence-classification",
264
+ "multiple-choice",
265
+ "token-classification",
266
+ "question-answering",
267
+ onnx_config_cls="models.deberta_v2.DebertaV2OnnxConfig",
268
+ ),
269
+ "deit": supported_features_mapping(
270
+ "default", "image-classification", onnx_config_cls="models.deit.DeiTOnnxConfig"
271
+ ),
272
+ "detr": supported_features_mapping(
273
+ "default",
274
+ "object-detection",
275
+ "image-segmentation",
276
+ onnx_config_cls="models.detr.DetrOnnxConfig",
277
+ ),
278
+ "distilbert": supported_features_mapping(
279
+ "default",
280
+ "masked-lm",
281
+ "sequence-classification",
282
+ "multiple-choice",
283
+ "token-classification",
284
+ "question-answering",
285
+ onnx_config_cls="models.distilbert.DistilBertOnnxConfig",
286
+ ),
287
+ "electra": supported_features_mapping(
288
+ "default",
289
+ "masked-lm",
290
+ "causal-lm",
291
+ "sequence-classification",
292
+ "multiple-choice",
293
+ "token-classification",
294
+ "question-answering",
295
+ onnx_config_cls="models.electra.ElectraOnnxConfig",
296
+ ),
297
+ "flaubert": supported_features_mapping(
298
+ "default",
299
+ "masked-lm",
300
+ "causal-lm",
301
+ "sequence-classification",
302
+ "multiple-choice",
303
+ "token-classification",
304
+ "question-answering",
305
+ onnx_config_cls="models.flaubert.FlaubertOnnxConfig",
306
+ ),
307
+ "gpt2": supported_features_mapping(
308
+ "default",
309
+ "default-with-past",
310
+ "causal-lm",
311
+ "causal-lm-with-past",
312
+ "sequence-classification",
313
+ "token-classification",
314
+ onnx_config_cls="models.gpt2.GPT2OnnxConfig",
315
+ ),
316
+ "gptj": supported_features_mapping(
317
+ "default",
318
+ "default-with-past",
319
+ "causal-lm",
320
+ "causal-lm-with-past",
321
+ "question-answering",
322
+ "sequence-classification",
323
+ onnx_config_cls="models.gptj.GPTJOnnxConfig",
324
+ ),
325
+ "gpt-neo": supported_features_mapping(
326
+ "default",
327
+ "default-with-past",
328
+ "causal-lm",
329
+ "causal-lm-with-past",
330
+ "sequence-classification",
331
+ onnx_config_cls="models.gpt_neo.GPTNeoOnnxConfig",
332
+ ),
333
+ "groupvit": supported_features_mapping(
334
+ "default",
335
+ onnx_config_cls="models.groupvit.GroupViTOnnxConfig",
336
+ ),
337
+ "ibert": supported_features_mapping(
338
+ "default",
339
+ "masked-lm",
340
+ "sequence-classification",
341
+ "multiple-choice",
342
+ "token-classification",
343
+ "question-answering",
344
+ onnx_config_cls="models.ibert.IBertOnnxConfig",
345
+ ),
346
+ "imagegpt": supported_features_mapping(
347
+ "default", "image-classification", onnx_config_cls="models.imagegpt.ImageGPTOnnxConfig"
348
+ ),
349
+ "layoutlm": supported_features_mapping(
350
+ "default",
351
+ "masked-lm",
352
+ "sequence-classification",
353
+ "token-classification",
354
+ onnx_config_cls="models.layoutlm.LayoutLMOnnxConfig",
355
+ ),
356
+ "layoutlmv3": supported_features_mapping(
357
+ "default",
358
+ "question-answering",
359
+ "sequence-classification",
360
+ "token-classification",
361
+ onnx_config_cls="models.layoutlmv3.LayoutLMv3OnnxConfig",
362
+ ),
363
+ "levit": supported_features_mapping(
364
+ "default", "image-classification", onnx_config_cls="models.levit.LevitOnnxConfig"
365
+ ),
366
+ "longt5": supported_features_mapping(
367
+ "default",
368
+ "default-with-past",
369
+ "seq2seq-lm",
370
+ "seq2seq-lm-with-past",
371
+ onnx_config_cls="models.longt5.LongT5OnnxConfig",
372
+ ),
373
+ "longformer": supported_features_mapping(
374
+ "default",
375
+ "masked-lm",
376
+ "multiple-choice",
377
+ "question-answering",
378
+ "sequence-classification",
379
+ "token-classification",
380
+ onnx_config_cls="models.longformer.LongformerOnnxConfig",
381
+ ),
382
+ "marian": supported_features_mapping(
383
+ "default",
384
+ "default-with-past",
385
+ "seq2seq-lm",
386
+ "seq2seq-lm-with-past",
387
+ "causal-lm",
388
+ "causal-lm-with-past",
389
+ onnx_config_cls="models.marian.MarianOnnxConfig",
390
+ ),
391
+ "mbart": supported_features_mapping(
392
+ "default",
393
+ "default-with-past",
394
+ "causal-lm",
395
+ "causal-lm-with-past",
396
+ "seq2seq-lm",
397
+ "seq2seq-lm-with-past",
398
+ "sequence-classification",
399
+ "question-answering",
400
+ onnx_config_cls="models.mbart.MBartOnnxConfig",
401
+ ),
402
+ "mobilebert": supported_features_mapping(
403
+ "default",
404
+ "masked-lm",
405
+ "sequence-classification",
406
+ "multiple-choice",
407
+ "token-classification",
408
+ "question-answering",
409
+ onnx_config_cls="models.mobilebert.MobileBertOnnxConfig",
410
+ ),
411
+ "mobilenet-v1": supported_features_mapping(
412
+ "default",
413
+ "image-classification",
414
+ onnx_config_cls="models.mobilenet_v1.MobileNetV1OnnxConfig",
415
+ ),
416
+ "mobilenet-v2": supported_features_mapping(
417
+ "default",
418
+ "image-classification",
419
+ onnx_config_cls="models.mobilenet_v2.MobileNetV2OnnxConfig",
420
+ ),
421
+ "mobilevit": supported_features_mapping(
422
+ "default",
423
+ "image-classification",
424
+ onnx_config_cls="models.mobilevit.MobileViTOnnxConfig",
425
+ ),
426
+ "mt5": supported_features_mapping(
427
+ "default",
428
+ "default-with-past",
429
+ "seq2seq-lm",
430
+ "seq2seq-lm-with-past",
431
+ onnx_config_cls="models.mt5.MT5OnnxConfig",
432
+ ),
433
+ "m2m-100": supported_features_mapping(
434
+ "default",
435
+ "default-with-past",
436
+ "seq2seq-lm",
437
+ "seq2seq-lm-with-past",
438
+ onnx_config_cls="models.m2m_100.M2M100OnnxConfig",
439
+ ),
440
+ "owlvit": supported_features_mapping(
441
+ "default",
442
+ onnx_config_cls="models.owlvit.OwlViTOnnxConfig",
443
+ ),
444
+ "perceiver": supported_features_mapping(
445
+ "image-classification",
446
+ "masked-lm",
447
+ "sequence-classification",
448
+ onnx_config_cls="models.perceiver.PerceiverOnnxConfig",
449
+ ),
450
+ "poolformer": supported_features_mapping(
451
+ "default", "image-classification", onnx_config_cls="models.poolformer.PoolFormerOnnxConfig"
452
+ ),
453
+ "rembert": supported_features_mapping(
454
+ "default",
455
+ "masked-lm",
456
+ "causal-lm",
457
+ "sequence-classification",
458
+ "multiple-choice",
459
+ "token-classification",
460
+ "question-answering",
461
+ onnx_config_cls="models.rembert.RemBertOnnxConfig",
462
+ ),
463
+ "resnet": supported_features_mapping(
464
+ "default",
465
+ "image-classification",
466
+ onnx_config_cls="models.resnet.ResNetOnnxConfig",
467
+ ),
468
+ "roberta": supported_features_mapping(
469
+ "default",
470
+ "masked-lm",
471
+ "causal-lm",
472
+ "sequence-classification",
473
+ "multiple-choice",
474
+ "token-classification",
475
+ "question-answering",
476
+ onnx_config_cls="models.roberta.RobertaOnnxConfig",
477
+ ),
478
+ "roformer": supported_features_mapping(
479
+ "default",
480
+ "masked-lm",
481
+ "causal-lm",
482
+ "sequence-classification",
483
+ "token-classification",
484
+ "multiple-choice",
485
+ "question-answering",
486
+ "token-classification",
487
+ onnx_config_cls="models.roformer.RoFormerOnnxConfig",
488
+ ),
489
+ "segformer": supported_features_mapping(
490
+ "default",
491
+ "image-classification",
492
+ "semantic-segmentation",
493
+ onnx_config_cls="models.segformer.SegformerOnnxConfig",
494
+ ),
495
+ "squeezebert": supported_features_mapping(
496
+ "default",
497
+ "masked-lm",
498
+ "sequence-classification",
499
+ "multiple-choice",
500
+ "token-classification",
501
+ "question-answering",
502
+ onnx_config_cls="models.squeezebert.SqueezeBertOnnxConfig",
503
+ ),
504
+ "swin": supported_features_mapping(
505
+ "default", "image-classification", onnx_config_cls="models.swin.SwinOnnxConfig"
506
+ ),
507
+ "t5": supported_features_mapping(
508
+ "default",
509
+ "default-with-past",
510
+ "seq2seq-lm",
511
+ "seq2seq-lm-with-past",
512
+ onnx_config_cls="models.t5.T5OnnxConfig",
513
+ ),
514
+ "vision-encoder-decoder": supported_features_mapping(
515
+ "vision2seq-lm", onnx_config_cls="models.vision_encoder_decoder.VisionEncoderDecoderOnnxConfig"
516
+ ),
517
+ "vit": supported_features_mapping(
518
+ "default", "image-classification", onnx_config_cls="models.vit.ViTOnnxConfig"
519
+ ),
520
+ "whisper": supported_features_mapping(
521
+ "default",
522
+ "default-with-past",
523
+ "speech2seq-lm",
524
+ "speech2seq-lm-with-past",
525
+ onnx_config_cls="models.whisper.WhisperOnnxConfig",
526
+ ),
527
+ "xlm": supported_features_mapping(
528
+ "default",
529
+ "masked-lm",
530
+ "causal-lm",
531
+ "sequence-classification",
532
+ "multiple-choice",
533
+ "token-classification",
534
+ "question-answering",
535
+ onnx_config_cls="models.xlm.XLMOnnxConfig",
536
+ ),
537
+ "xlm-roberta": supported_features_mapping(
538
+ "default",
539
+ "masked-lm",
540
+ "causal-lm",
541
+ "sequence-classification",
542
+ "multiple-choice",
543
+ "token-classification",
544
+ "question-answering",
545
+ onnx_config_cls="models.xlm_roberta.XLMRobertaOnnxConfig",
546
+ ),
547
+ "yolos": supported_features_mapping(
548
+ "default",
549
+ "object-detection",
550
+ onnx_config_cls="models.yolos.YolosOnnxConfig",
551
+ ),
552
+ }
553
+
554
+ AVAILABLE_FEATURES = sorted(reduce(lambda s1, s2: s1 | s2, (v.keys() for v in _SUPPORTED_MODEL_TYPE.values())))
555
+
556
+ @staticmethod
557
+ def get_supported_features_for_model_type(
558
+ model_type: str, model_name: Optional[str] = None
559
+ ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]:
560
+ """
561
+ Tries to retrieve the feature -> OnnxConfig constructor map from the model type.
562
+
563
+ Args:
564
+ model_type (`str`):
565
+ The model type to retrieve the supported features for.
566
+ model_name (`str`, *optional*):
567
+ The name attribute of the model object, only used for the exception message.
568
+
569
+ Returns:
570
+ The dictionary mapping each feature to a corresponding OnnxConfig constructor.
571
+ """
572
+ model_type = model_type.lower()
573
+ if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE:
574
+ model_type_and_model_name = f"{model_type} ({model_name})" if model_name else model_type
575
+ raise KeyError(
576
+ f"{model_type_and_model_name} is not supported yet. "
577
+ f"Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. "
578
+ f"If you want to support {model_type} please propose a PR or open up an issue."
579
+ )
580
+ return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type]
581
+
582
+ @staticmethod
583
+ def feature_to_task(feature: str) -> str:
584
+ return feature.replace("-with-past", "")
585
+
586
+ @staticmethod
587
+ def _validate_framework_choice(framework: str):
588
+ """
589
+ Validates if the framework requested for the export is both correct and available, otherwise throws an
590
+ exception.
591
+ """
592
+ if framework not in ["pt", "tf"]:
593
+ raise ValueError(
594
+ f"Only two frameworks are supported for ONNX export: pt or tf, but {framework} was provided."
595
+ )
596
+ elif framework == "pt" and not is_torch_available():
597
+ raise RuntimeError("Cannot export model to ONNX using PyTorch because no PyTorch package was found.")
598
+ elif framework == "tf" and not is_tf_available():
599
+ raise RuntimeError("Cannot export model to ONNX using TensorFlow because no TensorFlow package was found.")
600
+
601
+ @staticmethod
602
+ def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type:
603
+ """
604
+ Attempts to retrieve an AutoModel class from a feature name.
605
+
606
+ Args:
607
+ feature (`str`):
608
+ The feature required.
609
+ framework (`str`, *optional*, defaults to `"pt"`):
610
+ The framework to use for the export.
611
+
612
+ Returns:
613
+ The AutoModel class corresponding to the feature.
614
+ """
615
+ task = FeaturesManager.feature_to_task(feature)
616
+ FeaturesManager._validate_framework_choice(framework)
617
+ if framework == "pt":
618
+ task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS
619
+ else:
620
+ task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS
621
+ if task not in task_to_automodel:
622
+ raise KeyError(
623
+ f"Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}"
624
+ )
625
+
626
+ return task_to_automodel[task]
627
+
628
+ @staticmethod
629
+ def determine_framework(model: str, framework: str = None) -> str:
630
+ """
631
+ Determines the framework to use for the export.
632
+
633
+ The priority is in the following order:
634
+ 1. User input via `framework`.
635
+ 2. If local checkpoint is provided, use the same framework as the checkpoint.
636
+ 3. Available framework in environment, with priority given to PyTorch
637
+
638
+ Args:
639
+ model (`str`):
640
+ The name of the model to export.
641
+ framework (`str`, *optional*, defaults to `None`):
642
+ The framework to use for the export. See above for priority if none provided.
643
+
644
+ Returns:
645
+ The framework to use for the export.
646
+
647
+ """
648
+ if framework is not None:
649
+ return framework
650
+
651
+ framework_map = {"pt": "PyTorch", "tf": "TensorFlow"}
652
+ exporter_map = {"pt": "torch", "tf": "tf2onnx"}
653
+
654
+ if os.path.isdir(model):
655
+ if os.path.isfile(os.path.join(model, WEIGHTS_NAME)):
656
+ framework = "pt"
657
+ elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)):
658
+ framework = "tf"
659
+ else:
660
+ raise FileNotFoundError(
661
+ "Cannot determine framework from given checkpoint location."
662
+ f" There should be a {WEIGHTS_NAME} for PyTorch"
663
+ f" or {TF2_WEIGHTS_NAME} for TensorFlow."
664
+ )
665
+ logger.info(f"Local {framework_map[framework]} model found.")
666
+ else:
667
+ if is_torch_available():
668
+ framework = "pt"
669
+ elif is_tf_available():
670
+ framework = "tf"
671
+ else:
672
+ raise EnvironmentError("Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.")
673
+
674
+ logger.info(f"Framework not requested. Using {exporter_map[framework]} to export to ONNX.")
675
+
676
+ return framework
677
+
678
+ @staticmethod
679
+ def get_model_from_feature(
680
+ feature: str, model: str, framework: str = None, cache_dir: str = None
681
+ ) -> Union["PreTrainedModel", "TFPreTrainedModel"]:
682
+ """
683
+ Attempts to retrieve a model from a model's name and the feature to be enabled.
684
+
685
+ Args:
686
+ feature (`str`):
687
+ The feature required.
688
+ model (`str`):
689
+ The name of the model to export.
690
+ framework (`str`, *optional*, defaults to `None`):
691
+ The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should
692
+ none be provided.
693
+
694
+ Returns:
695
+ The instance of the model.
696
+
697
+ """
698
+ framework = FeaturesManager.determine_framework(model, framework)
699
+ model_class = FeaturesManager.get_model_class_for_feature(feature, framework)
700
+ try:
701
+ model = model_class.from_pretrained(model, cache_dir=cache_dir)
702
+ except OSError:
703
+ if framework == "pt":
704
+ logger.info("Loading TensorFlow model in PyTorch before exporting to ONNX.")
705
+ model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir)
706
+ else:
707
+ logger.info("Loading PyTorch model in TensorFlow before exporting to ONNX.")
708
+ model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir)
709
+ return model
710
+
711
+ @staticmethod
712
+ def check_supported_model_or_raise(
713
+ model: Union["PreTrainedModel", "TFPreTrainedModel"], feature: str = "default"
714
+ ) -> Tuple[str, Callable]:
715
+ """
716
+ Check whether or not the model has the requested features.
717
+
718
+ Args:
719
+ model: The model to export.
720
+ feature: The name of the feature to check if it is available.
721
+
722
+ Returns:
723
+ (str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties.
724
+
725
+ """
726
+ model_type = model.config.model_type.replace("_", "-")
727
+ model_name = getattr(model, "name", "")
728
+ model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name)
729
+ if feature not in model_features:
730
+ raise ValueError(
731
+ f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}"
732
+ )
733
+
734
+ return model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]
735
+
736
+ def get_config(model_type: str, feature: str) -> OnnxConfig:
737
+ """
738
+ Gets the OnnxConfig for a model_type and feature combination.
739
+
740
+ Args:
741
+ model_type (`str`):
742
+ The model type to retrieve the config for.
743
+ feature (`str`):
744
+ The feature to retrieve the config for.
745
+
746
+ Returns:
747
+ `OnnxConfig`: config for the combination
748
+ """
749
+ return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]
venv/lib/python3.10/site-packages/transformers/onnx/utils.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from ctypes import c_float, sizeof
16
+ from enum import Enum
17
+ from typing import TYPE_CHECKING, Optional, Union
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore
22
+
23
+
24
+ class ParameterFormat(Enum):
25
+ Float = c_float
26
+
27
+ @property
28
+ def size(self) -> int:
29
+ """
30
+ Number of byte required for this data type
31
+
32
+ Returns:
33
+ Integer > 0
34
+ """
35
+ return sizeof(self.value)
36
+
37
+
38
+ def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int:
39
+ """
40
+
41
+ Args:
42
+ dimension:
43
+ fixed_dimension:
44
+ num_token_to_add:
45
+
46
+ Returns:
47
+
48
+ """
49
+ # < 0 is possible if using a dynamic axis
50
+ if dimension <= 0:
51
+ dimension = fixed_dimension
52
+
53
+ dimension -= num_token_to_add
54
+ return dimension
55
+
56
+
57
+ def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int:
58
+ """
59
+ Compute the size taken by all the parameters in the given the storage format when serializing the model
60
+
61
+ Args:
62
+ num_parameters: Number of parameters to be saved
63
+ dtype: The data format each parameter will be saved
64
+
65
+ Returns:
66
+ Size (in byte) taken to save all the parameters
67
+ """
68
+ return num_parameters * dtype.size
69
+
70
+
71
+ def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]:
72
+ """
73
+ Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`.
74
+
75
+ Args:
76
+ model_name (`str`): Name of the model for which a preprocessor are loaded.
77
+
78
+ Returns:
79
+ `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`:
80
+ If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is
81
+ returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns
82
+ `None` if no preprocessor is found.
83
+ """
84
+ # Avoid circular imports by only importing this here.
85
+ from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore
86
+
87
+ try:
88
+ return AutoProcessor.from_pretrained(model_name)
89
+ except (ValueError, OSError, KeyError):
90
+ tokenizer, feature_extractor = None, None
91
+ try:
92
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
93
+ except (OSError, KeyError):
94
+ pass
95
+ try:
96
+ feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
97
+ except (OSError, KeyError):
98
+ pass
99
+
100
+ if tokenizer is not None and feature_extractor is not None:
101
+ raise ValueError(
102
+ f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor."
103
+ )
104
+ elif tokenizer is None and feature_extractor is None:
105
+ return None
106
+ elif tokenizer is not None:
107
+ return tokenizer
108
+ else:
109
+ return feature_extractor
venv/lib/python3.10/site-packages/transformers/quantizers/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .auto import AutoHfQuantizer, AutoQuantizationConfig
15
+ from .base import HfQuantizer
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (308 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc ADDED
Binary file (9.79 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc ADDED
Binary file (3.46 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc ADDED
Binary file (602 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/quantizers/auto.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import warnings
15
+ from typing import Dict, Optional, Union
16
+
17
+ from ..models.auto.configuration_auto import AutoConfig
18
+ from ..utils.quantization_config import (
19
+ AqlmConfig,
20
+ AwqConfig,
21
+ BitsAndBytesConfig,
22
+ GPTQConfig,
23
+ QuantizationConfigMixin,
24
+ QuantizationMethod,
25
+ QuantoConfig,
26
+ )
27
+ from .quantizer_aqlm import AqlmHfQuantizer
28
+ from .quantizer_awq import AwqQuantizer
29
+ from .quantizer_bnb_4bit import Bnb4BitHfQuantizer
30
+ from .quantizer_bnb_8bit import Bnb8BitHfQuantizer
31
+ from .quantizer_gptq import GptqHfQuantizer
32
+ from .quantizer_quanto import QuantoHfQuantizer
33
+
34
+
35
+ AUTO_QUANTIZER_MAPPING = {
36
+ "awq": AwqQuantizer,
37
+ "bitsandbytes_4bit": Bnb4BitHfQuantizer,
38
+ "bitsandbytes_8bit": Bnb8BitHfQuantizer,
39
+ "gptq": GptqHfQuantizer,
40
+ "aqlm": AqlmHfQuantizer,
41
+ "quanto": QuantoHfQuantizer,
42
+ }
43
+
44
+ AUTO_QUANTIZATION_CONFIG_MAPPING = {
45
+ "awq": AwqConfig,
46
+ "bitsandbytes_4bit": BitsAndBytesConfig,
47
+ "bitsandbytes_8bit": BitsAndBytesConfig,
48
+ "gptq": GPTQConfig,
49
+ "aqlm": AqlmConfig,
50
+ "quanto": QuantoConfig,
51
+ }
52
+
53
+
54
+ class AutoQuantizationConfig:
55
+ """
56
+ The Auto-HF quantization config class that takes care of automatically dispatching to the correct
57
+ quantization config given a quantization config stored in a dictionary.
58
+ """
59
+
60
+ @classmethod
61
+ def from_dict(cls, quantization_config_dict: Dict):
62
+ quant_method = quantization_config_dict.get("quant_method", None)
63
+ # We need a special care for bnb models to make sure everything is BC ..
64
+ if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
65
+ suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
66
+ quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
67
+ elif quant_method is None:
68
+ raise ValueError(
69
+ "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
70
+ )
71
+
72
+ if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys():
73
+ raise ValueError(
74
+ f"Unknown quantization type, got {quant_method} - supported types are:"
75
+ f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
76
+ )
77
+
78
+ target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
79
+ return target_cls.from_dict(quantization_config_dict)
80
+
81
+ @classmethod
82
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
83
+ model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
84
+ if getattr(model_config, "quantization_config", None) is None:
85
+ raise ValueError(
86
+ f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
87
+ )
88
+ quantization_config_dict = model_config.quantization_config
89
+ quantization_config = cls.from_dict(quantization_config_dict)
90
+ # Update with potential kwargs that are passed through from_pretrained.
91
+ quantization_config.update(kwargs)
92
+ return quantization_config
93
+
94
+
95
+ class AutoHfQuantizer:
96
+ """
97
+ The Auto-HF quantizer class that takes care of automatically instantiating to the correct
98
+ `HfQuantizer` given the `QuantizationConfig`.
99
+ """
100
+
101
+ @classmethod
102
+ def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs):
103
+ # Convert it to a QuantizationConfig if the q_config is a dict
104
+ if isinstance(quantization_config, dict):
105
+ quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
106
+
107
+ quant_method = quantization_config.quant_method
108
+
109
+ # Again, we need a special care for bnb as we have a single quantization config
110
+ # class for both 4-bit and 8-bit quantization
111
+ if quant_method == QuantizationMethod.BITS_AND_BYTES:
112
+ if quantization_config.load_in_8bit:
113
+ quant_method += "_8bit"
114
+ else:
115
+ quant_method += "_4bit"
116
+
117
+ if quant_method not in AUTO_QUANTIZER_MAPPING.keys():
118
+ raise ValueError(
119
+ f"Unknown quantization type, got {quant_method} - supported types are:"
120
+ f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
121
+ )
122
+
123
+ target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
124
+ return target_cls(quantization_config, **kwargs)
125
+
126
+ @classmethod
127
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
128
+ quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
129
+ return cls.from_config(quantization_config)
130
+
131
+ @classmethod
132
+ def merge_quantization_configs(
133
+ cls,
134
+ quantization_config: Union[dict, QuantizationConfigMixin],
135
+ quantization_config_from_args: Optional[QuantizationConfigMixin],
136
+ ):
137
+ """
138
+ handles situations where both quantization_config from args and quantization_config from model config are present.
139
+ """
140
+ if quantization_config_from_args is not None:
141
+ warning_msg = (
142
+ "You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
143
+ " already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
144
+ )
145
+ else:
146
+ warning_msg = ""
147
+
148
+ if isinstance(quantization_config, dict):
149
+ quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
150
+
151
+ if isinstance(quantization_config, (GPTQConfig, AwqConfig)) and quantization_config_from_args is not None:
152
+ # special case for GPTQ / AWQ config collision
153
+ loading_attr_dict = quantization_config_from_args.get_loading_attributes()
154
+ for attr, val in loading_attr_dict.items():
155
+ setattr(quantization_config, attr, val)
156
+ warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
157
+
158
+ if warning_msg != "":
159
+ warnings.warn(warning_msg)
160
+
161
+ return quantization_config
venv/lib/python3.10/site-packages/transformers/quantizers/base.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from abc import ABC, abstractmethod
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
16
+
17
+ from ..utils import is_torch_available
18
+ from ..utils.quantization_config import QuantizationConfigMixin
19
+
20
+
21
+ if TYPE_CHECKING:
22
+ from ..modeling_utils import PreTrainedModel
23
+
24
+ if is_torch_available():
25
+ import torch
26
+
27
+
28
+ class HfQuantizer(ABC):
29
+ """
30
+ Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization.
31
+ This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method
32
+ yet.
33
+
34
+ Attributes
35
+ quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`):
36
+ The quantization config that defines the quantization parameters of your model that you want to quantize.
37
+ modules_to_not_convert (`List[str]`, *optional*):
38
+ The list of module names to not convert when quantizing the model.
39
+ required_packages (`List[str]`, *optional*):
40
+ The list of required pip packages to install prior to using the quantizer
41
+ requires_calibration (`bool`):
42
+ Whether the quantization method requires to calibrate the model before using it.
43
+ requires_parameters_quantization (`bool`):
44
+ Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is
45
+ required to create a new xxxParameter in order to properly quantize the model.
46
+ """
47
+
48
+ requires_calibration = False
49
+ required_packages = None
50
+ requires_parameters_quantization = False
51
+
52
+ def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
53
+ self.quantization_config = quantization_config
54
+
55
+ # -- Handle extra kwargs below --
56
+ self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", [])
57
+ self.pre_quantized = kwargs.pop("pre_quantized", True)
58
+
59
+ if not self.pre_quantized and self.requires_calibration:
60
+ raise ValueError(
61
+ f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized."
62
+ f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to "
63
+ f"pass `pre_quantized=True` while knowing what you are doing."
64
+ )
65
+
66
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
67
+ """
68
+ Some quantization methods require to explicitly set the dtype of the model to a
69
+ target dtype. You need to override this method in case you want to make sure that behavior is
70
+ preserved
71
+
72
+ Args:
73
+ torch_dtype (`torch.dtype`):
74
+ The input dtype that is passed in `from_pretrained`
75
+ """
76
+ return torch_dtype
77
+
78
+ def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
79
+ """
80
+ Override this method if you want to pass a override the existing device map with a new
81
+ one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is
82
+ passed, the device_map is set to `"auto"``
83
+
84
+ Args:
85
+ device_map (`Union[dict, str]`, *optional*):
86
+ The device_map that is passed through the `from_pretrained` method.
87
+ """
88
+ return device_map
89
+
90
+ def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
91
+ """
92
+ Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained`
93
+ to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype`
94
+ to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.
95
+
96
+ Args:
97
+ torch_dtype (`torch.dtype`, *optional*):
98
+ The torch_dtype that is used to compute the device_map.
99
+ """
100
+ return torch_dtype
101
+
102
+ def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
103
+ """
104
+ Override this method if you want to adjust the `missing_keys`.
105
+
106
+ Args:
107
+ missing_keys (`List[str]`, *optional*):
108
+ The list of missing keys in the checkpoint compared to the state dict of the model
109
+ """
110
+ return missing_keys
111
+
112
+ def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]:
113
+ """
114
+ returns dtypes for modules that are not quantized - used for the computation of the device_map in case
115
+ one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified
116
+ in `_process_model_before_weight_loading`.
117
+
118
+ Args:
119
+ model (`~transformers.PreTrainedModel`):
120
+ The model to quantize
121
+ torch_dtype (`torch.dtype`):
122
+ The dtype passed in `from_pretrained` method.
123
+ """
124
+
125
+ return {
126
+ name: torch_dtype
127
+ for name, _ in model.named_parameters()
128
+ if any(m in name for m in self.modules_to_not_convert)
129
+ }
130
+
131
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
132
+ """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization"""
133
+ return max_memory
134
+
135
+ def check_quantized_param(
136
+ self,
137
+ model: "PreTrainedModel",
138
+ param_value: "torch.Tensor",
139
+ param_name: str,
140
+ state_dict: Dict[str, Any],
141
+ **kwargs,
142
+ ) -> bool:
143
+ """
144
+ checks if a loaded state_dict component is part of quantized param + some validation; only defined if
145
+ requires_parameters_quantization == True for quantization methods that require to create a new parameters
146
+ for quantization.
147
+ """
148
+ return False
149
+
150
+ def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter":
151
+ """
152
+ takes needed components from state_dict and creates quantized param; only applicable if
153
+ requires_parameters_quantization == True
154
+ """
155
+ if not self.requires_parameters_quantization:
156
+ raise AttributeError(
157
+ f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}."
158
+ )
159
+
160
+ def validate_environment(self, *args, **kwargs):
161
+ """
162
+ This method is used to potentially check for potential conflicts with arguments that are
163
+ passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers.
164
+ If no explicit check are needed, simply return nothing.
165
+ """
166
+ return
167
+
168
+ def preprocess_model(self, model: "PreTrainedModel", **kwargs):
169
+ """
170
+ Setting model attributes and/or converting model before weights loading. At this point
171
+ the model should be initialized on the meta device so you can freely manipulate the skeleton
172
+ of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
173
+
174
+ Args:
175
+ model (`~transformers.PreTrainedModel`):
176
+ The model to quantize
177
+ kwargs (`dict`, *optional*):
178
+ The keyword arguments that are passed along `_process_model_before_weight_loading`.
179
+ """
180
+ model.is_quantized = True
181
+ model.quantization_method = self.quantization_config.quant_method
182
+ return self._process_model_before_weight_loading(model, **kwargs)
183
+
184
+ def postprocess_model(self, model: "PreTrainedModel", **kwargs):
185
+ """
186
+ Post-process the model post weights loading.
187
+ Make sure to override the abstract method `_process_model_after_weight_loading`.
188
+
189
+ Args:
190
+ model (`~transformers.PreTrainedModel`):
191
+ The model to quantize
192
+ kwargs (`dict`, *optional*):
193
+ The keyword arguments that are passed along `_process_model_after_weight_loading`.
194
+ """
195
+ return self._process_model_after_weight_loading(model, **kwargs)
196
+
197
+ @abstractmethod
198
+ def _process_model_before_weight_loading(self, model, **kwargs):
199
+ ...
200
+
201
+ @abstractmethod
202
+ def _process_model_after_weight_loading(self, model, **kwargs):
203
+ ...
204
+
205
+ @property
206
+ @abstractmethod
207
+ def is_serializable(self):
208
+ ...
209
+
210
+ @property
211
+ @abstractmethod
212
+ def is_trainable(self):
213
+ ...
venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib
15
+ from typing import TYPE_CHECKING, Optional
16
+
17
+ from packaging import version
18
+
19
+ from .base import HfQuantizer
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ..modeling_utils import PreTrainedModel
24
+
25
+ from ..integrations import replace_with_aqlm_linear
26
+ from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available, logging
27
+ from ..utils.quantization_config import QuantizationConfigMixin
28
+
29
+
30
+ if is_torch_available():
31
+ import torch
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ class AqlmHfQuantizer(HfQuantizer):
37
+ """
38
+ Quantizer of the AQLM method. Enables the loading of prequantized models.
39
+ """
40
+
41
+ requires_calibration = True
42
+ required_packages = ["aqlm"]
43
+ optimum_quantizer = None
44
+
45
+ def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
46
+ super().__init__(quantization_config, **kwargs)
47
+ self.quantization_config = quantization_config
48
+
49
+ def validate_environment(self, *args, **kwargs):
50
+ if not is_accelerate_available():
51
+ raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`")
52
+
53
+ if not is_aqlm_available():
54
+ raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`")
55
+
56
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
57
+ if torch_dtype is None:
58
+ if torch.cuda.is_available():
59
+ torch_dtype = torch.float16
60
+ logger.info(
61
+ "CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually."
62
+ )
63
+ else:
64
+ torch_dtype = torch.float32
65
+ logger.info(
66
+ "CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `torch_dtype` manually."
67
+ )
68
+ return torch_dtype
69
+
70
+ def _process_model_before_weight_loading(
71
+ self,
72
+ model: "PreTrainedModel",
73
+ **kwargs,
74
+ ):
75
+ replace_with_aqlm_linear(
76
+ model,
77
+ quantization_config=self.quantization_config,
78
+ linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize,
79
+ )
80
+ model.config.quantization_config = self.quantization_config
81
+
82
+ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
83
+ return model
84
+
85
+ @property
86
+ def is_trainable(self, model: Optional["PreTrainedModel"] = None):
87
+ aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2")
88
+ if aqlm_supports_training:
89
+ return True
90
+ else:
91
+ logger.warning(
92
+ f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`"
93
+ )
94
+ return False
95
+
96
+ @property
97
+ def is_serializable(self):
98
+ return True
venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib.metadata
15
+ from typing import TYPE_CHECKING
16
+
17
+ from packaging import version
18
+
19
+ from .base import HfQuantizer
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ..modeling_utils import PreTrainedModel
24
+
25
+ from ..utils import is_accelerate_available, is_auto_awq_available, is_torch_available, logging
26
+ from ..utils.quantization_config import AWQLinearVersion
27
+
28
+
29
+ if is_torch_available():
30
+ import torch
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ class AwqQuantizer(HfQuantizer):
36
+ """
37
+ 4-bit quantization for Activation-aware Weight Quantization(AWQ) (https://arxiv.org/abs/2306.00978)
38
+ """
39
+
40
+ # AWQ requires data callibration - we support only inference
41
+ requires_calibration = True
42
+
43
+ required_packages = ["awq", "accelerate"]
44
+
45
+ def __init__(self, quantization_config, **kwargs):
46
+ super().__init__(quantization_config, **kwargs)
47
+
48
+ def validate_environment(self, device_map, **kwargs):
49
+ if not torch.cuda.is_available():
50
+ raise RuntimeError("GPU is required to run AWQ quantized model.")
51
+
52
+ if not is_auto_awq_available():
53
+ raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)")
54
+
55
+ if not is_accelerate_available():
56
+ raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)")
57
+
58
+ if device_map is None:
59
+ logger.warning_once(
60
+ "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set "
61
+ "your model on a GPU device in order to run your model."
62
+ )
63
+ elif device_map is not None:
64
+ if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
65
+ raise ValueError(
66
+ "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device."
67
+ " This is not supported. Please remove the CPU or disk device from the device_map."
68
+ )
69
+
70
+ def update_torch_dtype(self, torch_dtype):
71
+ if torch_dtype is None:
72
+ torch_dtype = torch.float16
73
+ elif torch_dtype != torch.float16:
74
+ logger.warning("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.")
75
+ return torch_dtype
76
+
77
+ def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
78
+ from ..integrations import get_keys_to_not_convert, replace_with_awq_linear
79
+
80
+ self.modules_to_not_convert = get_keys_to_not_convert(model)
81
+
82
+ if self.quantization_config.modules_to_not_convert is not None:
83
+ self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
84
+
85
+ model, has_been_replaced = replace_with_awq_linear(
86
+ model, quantization_config=self.quantization_config, modules_to_not_convert=self.modules_to_not_convert
87
+ )
88
+
89
+ if not has_been_replaced:
90
+ logger.warning(
91
+ "You are loading an AWQ model but no linear modules were found in your model."
92
+ " Please double check your model architecture, or submit an issue on github if you think this is a bug."
93
+ )
94
+
95
+ def _process_model_after_weight_loading(self, model):
96
+ if self.quantization_config.do_fuse:
97
+ from ..integrations import fuse_awq_modules
98
+
99
+ model = fuse_awq_modules(model, self.quantization_config)
100
+ model._awq_is_fused = True # TODO: consider storing this flag in model.config instead
101
+
102
+ if self.quantization_config.version == AWQLinearVersion.EXLLAMA:
103
+ from ..integrations import post_init_awq_exllama_modules
104
+
105
+ model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config)
106
+
107
+ @property
108
+ def is_serializable(self):
109
+ # AWQ through auto-awq has been always serializable, except if the model is fused.
110
+ if self.quantization_config.do_fuse:
111
+ logger.warning("You cannot save an AWQ model that uses fused modules!")
112
+ return False
113
+
114
+ if self.quantization_config.version == AWQLinearVersion.EXLLAMA:
115
+ logger.warning("You cannot save an AWQ model that uses Exllama backend!")
116
+ return False
117
+
118
+ return True
119
+
120
+ @property
121
+ def is_trainable(self):
122
+ # AWQ supports PEFT fine-tuning from version 0.2.0
123
+ MIN_AWQ_VERSION_FOR_PEFT = "0.2.0"
124
+ return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT)
venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
16
+
17
+ from packaging import version
18
+
19
+ from .base import HfQuantizer
20
+ from .quantizers_utils import get_module_from_name
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from ..modeling_utils import PreTrainedModel
25
+
26
+ from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging
27
+
28
+
29
+ if is_torch_available():
30
+ import torch
31
+
32
+ from ..pytorch_utils import Conv1D
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ class Bnb4BitHfQuantizer(HfQuantizer):
38
+ """
39
+ 4-bit quantization from bitsandbytes.py quantization method:
40
+ before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the
41
+ layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call
42
+ saving:
43
+ from state dict, as usual; saves weights and `quant_state` components
44
+ loading:
45
+ need to locate `quant_state` components and pass to Param4bit constructor
46
+ """
47
+
48
+ use_keep_in_fp32_modules = True
49
+ requires_parameters_quantization = True
50
+ requires_calibration = False
51
+
52
+ required_packages = ["bitsandbytes", "accelerate"]
53
+
54
+ def __init__(self, quantization_config, **kwargs):
55
+ super().__init__(quantization_config, **kwargs)
56
+
57
+ if self.quantization_config.llm_int8_skip_modules is not None:
58
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
59
+
60
+ def validate_environment(self, *args, **kwargs):
61
+ if not (is_accelerate_available() and is_bitsandbytes_available()):
62
+ raise ImportError(
63
+ "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` "
64
+ "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`"
65
+ )
66
+
67
+ if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
68
+ raise ValueError(
69
+ "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make"
70
+ " sure the weights are in PyTorch format."
71
+ )
72
+
73
+ if not torch.cuda.is_available():
74
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
75
+
76
+ device_map = kwargs.get("device_map", None)
77
+ if (
78
+ device_map is not None
79
+ and isinstance(device_map, dict)
80
+ and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
81
+ ):
82
+ device_map_without_lm_head = {
83
+ key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
84
+ }
85
+ if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values():
86
+ raise ValueError(
87
+ """
88
+ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the
89
+ quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules
90
+ in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to
91
+ `from_pretrained`. Check
92
+ https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu
93
+ for more details.
94
+ """
95
+ )
96
+
97
+ if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.39.0"):
98
+ raise ValueError(
99
+ "You have a version of `bitsandbytes` that is not compatible with 4bit inference and training"
100
+ " make sure you have the latest version of `bitsandbytes` installed"
101
+ )
102
+
103
+ def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
104
+ if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"):
105
+ from accelerate.utils import CustomDtype
106
+
107
+ if target_dtype != torch.int8:
108
+ logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization")
109
+ return CustomDtype.INT4
110
+ else:
111
+ raise ValueError(
112
+ "You are using `device_map='auto'` on a 4bit loaded version of the model. To automatically compute"
113
+ " the appropriate device map, you should upgrade your `accelerate` library,"
114
+ "`pip install --upgrade accelerate` or install it from source to support fp4 auto device map"
115
+ "calculation. You may encounter unexpected behavior, or pass your own device map"
116
+ )
117
+
118
+ def check_quantized_param(
119
+ self,
120
+ model: "PreTrainedModel",
121
+ param_value: "torch.Tensor",
122
+ param_name: str,
123
+ state_dict: Dict[str, Any],
124
+ **kwargs,
125
+ ) -> bool:
126
+ import bitsandbytes as bnb
127
+
128
+ module, tensor_name = get_module_from_name(model, param_name)
129
+ if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit):
130
+ # Add here check for loaded components' dtypes once serialization is implemented
131
+ return True
132
+ elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias":
133
+ # bias could be loaded by regular set_module_tensor_to_device() from accelerate,
134
+ # but it would wrongly use uninitialized weight there.
135
+ return True
136
+ else:
137
+ return False
138
+
139
+ def create_quantized_param(
140
+ self,
141
+ model: "PreTrainedModel",
142
+ param_value: "torch.Tensor",
143
+ param_name: str,
144
+ target_device: "torch.device",
145
+ state_dict: Dict[str, Any],
146
+ unexpected_keys: Optional[List[str]] = None,
147
+ ):
148
+ """
149
+ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
150
+ """
151
+ import bitsandbytes as bnb
152
+
153
+ module, tensor_name = get_module_from_name(model, param_name)
154
+
155
+ if tensor_name not in module._parameters:
156
+ raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
157
+
158
+ old_value = getattr(module, tensor_name)
159
+
160
+ if tensor_name == "bias":
161
+ if param_value is None:
162
+ new_value = old_value.to(target_device)
163
+ else:
164
+ new_value = param_value.to(target_device)
165
+
166
+ new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad)
167
+ module._parameters[tensor_name] = new_value
168
+ return
169
+
170
+ if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit):
171
+ raise ValueError("this function only loads `Linear4bit components`")
172
+ if (
173
+ old_value.device == torch.device("meta")
174
+ and target_device not in ["meta", torch.device("meta")]
175
+ and param_value is None
176
+ ):
177
+ raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
178
+
179
+ # construct `new_value` for the module._parameters[tensor_name]:
180
+ if self.pre_quantized:
181
+ # 4bit loading. Collecting components for restoring quantized weight
182
+ # This can be expanded to make a universal call for any quantized weight loading
183
+
184
+ if not self.is_serializable:
185
+ raise ValueError(
186
+ "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. "
187
+ "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
188
+ )
189
+
190
+ if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and (
191
+ param_name + ".quant_state.bitsandbytes__nf4" not in state_dict
192
+ ):
193
+ raise ValueError(
194
+ f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components."
195
+ )
196
+
197
+ quantized_stats = {}
198
+ for k, v in state_dict.items():
199
+ if param_name + "." in k:
200
+ quantized_stats[k] = v
201
+ if unexpected_keys is not None and k in unexpected_keys:
202
+ unexpected_keys.remove(k)
203
+
204
+ new_value = bnb.nn.Params4bit.from_prequantized(
205
+ data=param_value,
206
+ quantized_stats=quantized_stats,
207
+ requires_grad=False,
208
+ device=target_device,
209
+ )
210
+ else:
211
+ new_value = param_value.to("cpu")
212
+
213
+ # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
214
+ # Since weights are saved in the correct "orientation", we skip transposing when loading.
215
+ if issubclass(module.source_cls, Conv1D):
216
+ new_value = new_value.T
217
+
218
+ kwargs = old_value.__dict__
219
+ new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device)
220
+
221
+ module._parameters[tensor_name] = new_value
222
+
223
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.adjust_max_memory
224
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
225
+ # need more space for buffers that are created during quantization
226
+ max_memory = {key: val * 0.90 for key, val in max_memory.items()}
227
+ return max_memory
228
+
229
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_torch_dtype
230
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
231
+ if torch_dtype is None:
232
+ # We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
233
+ logger.info(
234
+ "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
235
+ "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
236
+ "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
237
+ " torch_dtype=torch.float16 to remove this warning.",
238
+ torch_dtype,
239
+ )
240
+ torch_dtype = torch.float16
241
+ return torch_dtype
242
+
243
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_device_map
244
+ def update_device_map(self, device_map):
245
+ if device_map is None:
246
+ device_map = {"": torch.cuda.current_device()}
247
+ logger.info(
248
+ "The device_map was not initialized. "
249
+ "Setting device_map to {'':torch.cuda.current_device()}. "
250
+ "If you want to use the model for inference, please set device_map ='auto' "
251
+ )
252
+ return device_map
253
+
254
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_before_weight_loading
255
+ def _process_model_before_weight_loading(
256
+ self,
257
+ model: "PreTrainedModel",
258
+ device_map,
259
+ keep_in_fp32_modules: List[str] = [],
260
+ **kwargs,
261
+ ):
262
+ from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear
263
+
264
+ load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload
265
+
266
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
267
+ if self.quantization_config.llm_int8_skip_modules is None:
268
+ self.modules_to_not_convert = get_keys_to_not_convert(model)
269
+ else:
270
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
271
+
272
+ if not isinstance(self.modules_to_not_convert, list):
273
+ self.modules_to_not_convert = [self.modules_to_not_convert]
274
+
275
+ self.modules_to_not_convert.extend(keep_in_fp32_modules)
276
+
277
+ # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
278
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
279
+ keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
280
+
281
+ if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
282
+ raise ValueError(
283
+ "If you want to offload some keys to `cpu` or `disk`, you need to set "
284
+ "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
285
+ " converted to 8-bit but kept in 32-bit."
286
+ )
287
+ self.modules_to_not_convert.extend(keys_on_cpu)
288
+
289
+ model = replace_with_bnb_linear(
290
+ model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
291
+ )
292
+ # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here
293
+
294
+ model.config.quantization_config = self.quantization_config
295
+
296
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_after_weight_loading with 8bit->4bit
297
+ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
298
+ model.is_loaded_in_4bit = True
299
+ model.is_4bit_serializable = self.is_serializable
300
+ return model
301
+
302
+ @property
303
+ def is_serializable(self):
304
+ _is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.41.3")
305
+
306
+ if not _is_4bit_serializable:
307
+ logger.warning(
308
+ "You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. "
309
+ "If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed."
310
+ )
311
+ return False
312
+
313
+ return True
314
+
315
+ @property
316
+ def is_trainable(self) -> bool:
317
+ return True
venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import importlib
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
16
+
17
+ from packaging import version
18
+
19
+ from .base import HfQuantizer
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ..modeling_utils import PreTrainedModel
24
+
25
+ from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging
26
+ from .quantizers_utils import get_module_from_name
27
+
28
+
29
+ if is_torch_available():
30
+ import torch
31
+
32
+ from ..pytorch_utils import Conv1D
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ class Bnb8BitHfQuantizer(HfQuantizer):
38
+ """
39
+ 8-bit quantization from bitsandbytes quantization method:
40
+ before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the
41
+ layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call
42
+ saving:
43
+ from state dict, as usual; saves weights and 'SCB' component
44
+ loading:
45
+ need to locate SCB component and pass to the Linear8bitLt object
46
+ """
47
+
48
+ use_keep_in_fp32_modules = True
49
+ requires_parameters_quantization = True
50
+ requires_calibration = False
51
+
52
+ required_packages = ["bitsandbytes", "accelerate"]
53
+
54
+ def __init__(self, quantization_config, **kwargs):
55
+ super().__init__(quantization_config, **kwargs)
56
+
57
+ if self.quantization_config.llm_int8_skip_modules is not None:
58
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
59
+
60
+ def validate_environment(self, *args, **kwargs):
61
+ if not (is_accelerate_available() and is_bitsandbytes_available()):
62
+ raise ImportError(
63
+ "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` "
64
+ "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`"
65
+ )
66
+
67
+ if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
68
+ raise ValueError(
69
+ "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make"
70
+ " sure the weights are in PyTorch format."
71
+ )
72
+
73
+ if not torch.cuda.is_available():
74
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
75
+
76
+ device_map = kwargs.get("device_map", None)
77
+ if (
78
+ device_map is not None
79
+ and isinstance(device_map, dict)
80
+ and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
81
+ ):
82
+ device_map_without_lm_head = {
83
+ key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
84
+ }
85
+ if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values():
86
+ raise ValueError(
87
+ """
88
+ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the
89
+ quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules
90
+ in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to
91
+ `from_pretrained`. Check
92
+ https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu
93
+ for more details.
94
+ """
95
+ )
96
+
97
+ if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"):
98
+ raise ValueError(
99
+ "You have a version of `bitsandbytes` that is not compatible with 8bit inference and training"
100
+ " make sure you have the latest version of `bitsandbytes` installed"
101
+ )
102
+
103
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
104
+ # need more space for buffers that are created during quantization
105
+ max_memory = {key: val * 0.90 for key, val in max_memory.items()}
106
+ return max_memory
107
+
108
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
109
+ if torch_dtype is None:
110
+ # We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
111
+ logger.info(
112
+ "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
113
+ "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
114
+ "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
115
+ " torch_dtype=torch.float16 to remove this warning.",
116
+ torch_dtype,
117
+ )
118
+ torch_dtype = torch.float16
119
+ return torch_dtype
120
+
121
+ def update_device_map(self, device_map):
122
+ if device_map is None:
123
+ device_map = {"": torch.cuda.current_device()}
124
+ logger.info(
125
+ "The device_map was not initialized. "
126
+ "Setting device_map to {'':torch.cuda.current_device()}. "
127
+ "If you want to use the model for inference, please set device_map ='auto' "
128
+ )
129
+ return device_map
130
+
131
+ def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
132
+ if target_dtype != torch.int8:
133
+ logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization")
134
+ return torch.int8
135
+
136
+ def check_quantized_param(
137
+ self,
138
+ model: "PreTrainedModel",
139
+ param_value: "torch.Tensor",
140
+ param_name: str,
141
+ state_dict: Dict[str, Any],
142
+ **kwargs,
143
+ ):
144
+ import bitsandbytes as bnb
145
+
146
+ module, tensor_name = get_module_from_name(model, param_name)
147
+ if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params):
148
+ if self.pre_quantized:
149
+ if param_name.replace("weight", "SCB") not in state_dict.keys():
150
+ raise ValueError("Missing quantization component `SCB`")
151
+ if param_value.dtype != torch.int8:
152
+ raise ValueError(
153
+ f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`."
154
+ )
155
+ return True
156
+ return False
157
+
158
+ def create_quantized_param(
159
+ self,
160
+ model: "PreTrainedModel",
161
+ param_value: "torch.Tensor",
162
+ param_name: str,
163
+ target_device: "torch.device",
164
+ state_dict: Dict[str, Any],
165
+ unexpected_keys: Optional[List[str]] = None,
166
+ ):
167
+ """
168
+ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
169
+ needs aux items from state dicts, if found - removes them from unexpected_keys
170
+ """
171
+ import bitsandbytes as bnb
172
+
173
+ fp16_statistics_key = param_name.replace("weight", "SCB")
174
+ fp16_weights_format_key = param_name.replace("weight", "weight_format")
175
+
176
+ fp16_statistics = state_dict.get(fp16_statistics_key, None)
177
+ fp16_weights_format = state_dict.get(fp16_weights_format_key, None)
178
+
179
+ module, tensor_name = get_module_from_name(model, param_name)
180
+ if tensor_name not in module._parameters:
181
+ raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
182
+
183
+ old_value = getattr(module, tensor_name)
184
+
185
+ if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params):
186
+ raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.")
187
+ if (
188
+ old_value.device == torch.device("meta")
189
+ and target_device not in ["meta", torch.device("meta")]
190
+ and param_value is None
191
+ ):
192
+ raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
193
+
194
+ new_value = param_value.to("cpu")
195
+ if self.pre_quantized and not self.is_serializable:
196
+ raise ValueError(
197
+ "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
198
+ "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
199
+ )
200
+
201
+ # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
202
+ # Since weights are saved in the correct "orientation", we skip transposing when loading.
203
+ if issubclass(module.source_cls, Conv1D):
204
+ if fp16_statistics is None:
205
+ new_value = new_value.T
206
+
207
+ kwargs = old_value.__dict__
208
+ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device)
209
+
210
+ module._parameters[tensor_name] = new_value
211
+ if fp16_statistics is not None:
212
+ setattr(module.weight, "SCB", fp16_statistics.to(target_device))
213
+ if unexpected_keys is not None:
214
+ unexpected_keys.remove(fp16_statistics_key)
215
+
216
+ # We just need to pop the `weight_format` keys from the state dict to remove unneeded
217
+ # messages. The correct format is correctly retrieved during the first forward pass.
218
+ if fp16_weights_format is not None and unexpected_keys is not None:
219
+ unexpected_keys.remove(fp16_weights_format_key)
220
+
221
+ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
222
+ model.is_loaded_in_8bit = True
223
+ model.is_8bit_serializable = self.is_serializable
224
+ return model
225
+
226
+ def _process_model_before_weight_loading(
227
+ self,
228
+ model: "PreTrainedModel",
229
+ device_map,
230
+ keep_in_fp32_modules: List[str] = [],
231
+ **kwargs,
232
+ ):
233
+ from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear
234
+
235
+ load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload
236
+
237
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
238
+ if self.quantization_config.llm_int8_skip_modules is None:
239
+ self.modules_to_not_convert = get_keys_to_not_convert(model)
240
+ else:
241
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
242
+
243
+ if not isinstance(self.modules_to_not_convert, list):
244
+ self.modules_to_not_convert = [self.modules_to_not_convert]
245
+
246
+ self.modules_to_not_convert.extend(keep_in_fp32_modules)
247
+
248
+ # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
249
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
250
+ keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
251
+
252
+ if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
253
+ raise ValueError(
254
+ "If you want to offload some keys to `cpu` or `disk`, you need to set "
255
+ "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
256
+ " converted to 8-bit but kept in 32-bit."
257
+ )
258
+ self.modules_to_not_convert.extend(keys_on_cpu)
259
+
260
+ model = replace_with_bnb_linear(
261
+ model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
262
+ )
263
+ # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here
264
+
265
+ model.config.quantization_config = self.quantization_config
266
+
267
+ @property
268
+ def is_serializable(self):
269
+ _bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse(
270
+ "0.37.2"
271
+ )
272
+
273
+ if not _bnb_supports_8bit_serialization:
274
+ logger.warning(
275
+ "You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. "
276
+ "If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or"
277
+ " unexpected behaviours."
278
+ )
279
+ return False
280
+
281
+ return True
282
+
283
+ @property
284
+ def is_trainable(self) -> bool:
285
+ return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0")