applied-ai-018 commited on
Commit
7a1062e
·
verified ·
1 Parent(s): 42f831d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/tokenizers/__init__.py +100 -0
  2. env-llmeval/lib/python3.10/site-packages/tokenizers/__init__.pyi +1123 -0
  3. env-llmeval/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/tokenizers/decoders/__init__.py +14 -0
  5. env-llmeval/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi +270 -0
  6. env-llmeval/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__init__.py +6 -0
  8. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/base_tokenizer.py +418 -0
  16. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/bert_wordpiece.py +151 -0
  17. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py +122 -0
  18. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py +150 -0
  19. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_bpe.py +102 -0
  20. env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py +194 -0
  21. env-llmeval/lib/python3.10/site-packages/tokenizers/models/__init__.py +8 -0
  22. env-llmeval/lib/python3.10/site-packages/tokenizers/models/__init__.pyi +562 -0
  23. env-llmeval/lib/python3.10/site-packages/tokenizers/models/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/tokenizers/normalizers/__init__.py +29 -0
  25. env-llmeval/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi +583 -0
  26. env-llmeval/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.py +15 -0
  28. env-llmeval/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.pyi +593 -0
  29. env-llmeval/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/tokenizers/processors/__init__.py +9 -0
  31. env-llmeval/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi +337 -0
  32. env-llmeval/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/tokenizers/tools/__init__.py +1 -0
  34. env-llmeval/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css +170 -0
  37. env-llmeval/lib/python3.10/site-packages/tokenizers/tools/visualizer.py +403 -0
  38. env-llmeval/lib/python3.10/site-packages/tokenizers/trainers/__init__.py +8 -0
  39. env-llmeval/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi +158 -0
  40. env-llmeval/lib/python3.10/site-packages/tokenizers/trainers/__pycache__/__init__.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/torchgen/__init__.py +10 -0
  42. env-llmeval/lib/python3.10/site-packages/torchgen/api/lazy.py +464 -0
  43. env-llmeval/lib/python3.10/site-packages/torchgen/code_template.py +96 -0
  44. env-llmeval/lib/python3.10/site-packages/torchgen/context.py +128 -0
  45. env-llmeval/lib/python3.10/site-packages/torchgen/gen.py +0 -0
  46. env-llmeval/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py +609 -0
  47. env-llmeval/lib/python3.10/site-packages/torchgen/gen_executorch.py +978 -0
  48. env-llmeval/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py +791 -0
  49. env-llmeval/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py +605 -0
  50. env-llmeval/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py +265 -0
env-llmeval/lib/python3.10/site-packages/tokenizers/__init__.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import List, Tuple, Union
3
+
4
+
5
+ Offsets = Tuple[int, int]
6
+
7
+ TextInputSequence = str
8
+ """A :obj:`str` that represents an input sequence """
9
+
10
+ PreTokenizedInputSequence = Union[List[str], Tuple[str]]
11
+ """A pre-tokenized input sequence. Can be one of:
12
+
13
+ - A :obj:`List` of :obj:`str`
14
+ - A :obj:`Tuple` of :obj:`str`
15
+ """
16
+
17
+ TextEncodeInput = Union[
18
+ TextInputSequence,
19
+ Tuple[TextInputSequence, TextInputSequence],
20
+ List[TextInputSequence],
21
+ ]
22
+ """Represents a textual input for encoding. Can be either:
23
+
24
+ - A single sequence: :data:`~tokenizers.TextInputSequence`
25
+ - A pair of sequences:
26
+
27
+ - A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
28
+ - Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
29
+ """
30
+
31
+ PreTokenizedEncodeInput = Union[
32
+ PreTokenizedInputSequence,
33
+ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
34
+ List[PreTokenizedInputSequence],
35
+ ]
36
+ """Represents a pre-tokenized input for encoding. Can be either:
37
+
38
+ - A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
39
+ - A pair of sequences:
40
+
41
+ - A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
42
+ - Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
43
+ """
44
+
45
+ InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
46
+ """Represents all the possible types of input sequences for encoding. Can be:
47
+
48
+ - When ``is_pretokenized=False``: :data:`~TextInputSequence`
49
+ - When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
50
+ """
51
+
52
+ EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
53
+ """Represents all the possible types of input for encoding. Can be:
54
+
55
+ - When ``is_pretokenized=False``: :data:`~TextEncodeInput`
56
+ - When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
57
+ """
58
+
59
+
60
+ class OffsetReferential(Enum):
61
+ ORIGINAL = "original"
62
+ NORMALIZED = "normalized"
63
+
64
+
65
+ class OffsetType(Enum):
66
+ BYTE = "byte"
67
+ CHAR = "char"
68
+
69
+
70
+ class SplitDelimiterBehavior(Enum):
71
+ REMOVED = "removed"
72
+ ISOLATED = "isolated"
73
+ MERGED_WITH_PREVIOUS = "merged_with_previous"
74
+ MERGED_WITH_NEXT = "merged_with_next"
75
+ CONTIGUOUS = "contiguous"
76
+
77
+
78
+ from .tokenizers import (
79
+ AddedToken,
80
+ Encoding,
81
+ NormalizedString,
82
+ PreTokenizedString,
83
+ Regex,
84
+ Token,
85
+ Tokenizer,
86
+ decoders,
87
+ models,
88
+ normalizers,
89
+ pre_tokenizers,
90
+ processors,
91
+ trainers,
92
+ __version__,
93
+ )
94
+ from .implementations import (
95
+ BertWordPieceTokenizer,
96
+ ByteLevelBPETokenizer,
97
+ CharBPETokenizer,
98
+ SentencePieceBPETokenizer,
99
+ SentencePieceUnigramTokenizer,
100
+ )
env-llmeval/lib/python3.10/site-packages/tokenizers/__init__.pyi ADDED
@@ -0,0 +1,1123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class AddedToken:
3
+ """
4
+ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
5
+ It can have special options that defines the way it should behave.
6
+
7
+ Args:
8
+ content (:obj:`str`): The content of the token
9
+
10
+ single_word (:obj:`bool`, defaults to :obj:`False`):
11
+ Defines whether this token should only match single words. If :obj:`True`, this
12
+ token will never match inside of a word. For example the token ``ing`` would match
13
+ on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
14
+ The notion of "`inside of a word`" is defined by the word boundaries pattern in
15
+ regular expressions (ie. the token should start and end with word boundaries).
16
+
17
+ lstrip (:obj:`bool`, defaults to :obj:`False`):
18
+ Defines whether this token should strip all potential whitespaces on its left side.
19
+ If :obj:`True`, this token will greedily match any whitespace on its left. For
20
+ example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
21
+ ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
22
+
23
+ rstrip (:obj:`bool`, defaults to :obj:`False`):
24
+ Defines whether this token should strip all potential whitespaces on its right
25
+ side. If :obj:`True`, this token will greedily match any whitespace on its right.
26
+ It works just like :obj:`lstrip` but on the right.
27
+
28
+ normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
29
+ Defines whether this token should match against the normalized version of the input
30
+ text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
31
+ lowercasing the text, the token could be extract from the input ``"I saw a lion
32
+ Yesterday"``.
33
+ special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
34
+ Defines whether this token should be skipped when decoding.
35
+
36
+ """
37
+
38
+ def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
39
+ pass
40
+ @property
41
+ def content(self):
42
+ """
43
+ Get the content of this :obj:`AddedToken`
44
+ """
45
+ pass
46
+ @property
47
+ def lstrip(self):
48
+ """
49
+ Get the value of the :obj:`lstrip` option
50
+ """
51
+ pass
52
+ @property
53
+ def normalized(self):
54
+ """
55
+ Get the value of the :obj:`normalized` option
56
+ """
57
+ pass
58
+ @property
59
+ def rstrip(self):
60
+ """
61
+ Get the value of the :obj:`rstrip` option
62
+ """
63
+ pass
64
+ @property
65
+ def single_word(self):
66
+ """
67
+ Get the value of the :obj:`single_word` option
68
+ """
69
+ pass
70
+ @property
71
+ def special(self):
72
+ """
73
+ Get the value of the :obj:`special` option
74
+ """
75
+ pass
76
+
77
+ class Encoding:
78
+ """
79
+ The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
80
+ """
81
+
82
+ @property
83
+ def attention_mask(self):
84
+ """
85
+ The attention mask
86
+
87
+ This indicates to the LM which tokens should be attended to, and which should not.
88
+ This is especially important when batching sequences, where we need to applying
89
+ padding.
90
+
91
+ Returns:
92
+ :obj:`List[int]`: The attention mask
93
+ """
94
+ pass
95
+ def char_to_token(self, char_pos, sequence_index=0):
96
+ """
97
+ Get the token that contains the char at the given position in the input sequence.
98
+
99
+ Args:
100
+ char_pos (:obj:`int`):
101
+ The position of a char in the input string
102
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
103
+ The index of the sequence that contains the target char
104
+
105
+ Returns:
106
+ :obj:`int`: The index of the token that contains this char in the encoded sequence
107
+ """
108
+ pass
109
+ def char_to_word(self, char_pos, sequence_index=0):
110
+ """
111
+ Get the word that contains the char at the given position in the input sequence.
112
+
113
+ Args:
114
+ char_pos (:obj:`int`):
115
+ The position of a char in the input string
116
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
117
+ The index of the sequence that contains the target char
118
+
119
+ Returns:
120
+ :obj:`int`: The index of the word that contains this char in the input sequence
121
+ """
122
+ pass
123
+ @property
124
+ def ids(self):
125
+ """
126
+ The generated IDs
127
+
128
+ The IDs are the main input to a Language Model. They are the token indices,
129
+ the numerical representations that a LM understands.
130
+
131
+ Returns:
132
+ :obj:`List[int]`: The list of IDs
133
+ """
134
+ pass
135
+ @staticmethod
136
+ def merge(encodings, growing_offsets=True):
137
+ """
138
+ Merge the list of encodings into one final :class:`~tokenizers.Encoding`
139
+
140
+ Args:
141
+ encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
142
+ The list of encodings that should be merged in one
143
+
144
+ growing_offsets (:obj:`bool`, defaults to :obj:`True`):
145
+ Whether the offsets should accumulate while merging
146
+
147
+ Returns:
148
+ :class:`~tokenizers.Encoding`: The resulting Encoding
149
+ """
150
+ pass
151
+ @property
152
+ def n_sequences(self):
153
+ """
154
+ The number of sequences represented
155
+
156
+ Returns:
157
+ :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
158
+ """
159
+ pass
160
+ @property
161
+ def offsets(self):
162
+ """
163
+ The offsets associated to each token
164
+
165
+ These offsets let's you slice the input string, and thus retrieve the original
166
+ part that led to producing the corresponding token.
167
+
168
+ Returns:
169
+ A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
170
+ """
171
+ pass
172
+ @property
173
+ def overflowing(self):
174
+ """
175
+ A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
176
+
177
+ When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
178
+ the output into as many pieces as required to match the specified maximum length.
179
+ This field lets you retrieve all the subsequent pieces.
180
+
181
+ When you use pairs of sequences, the overflowing pieces will contain enough
182
+ variations to cover all the possible combinations, while respecting the provided
183
+ maximum length.
184
+ """
185
+ pass
186
+ def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
187
+ """
188
+ Pad the :class:`~tokenizers.Encoding` at the given length
189
+
190
+ Args:
191
+ length (:obj:`int`):
192
+ The desired length
193
+
194
+ direction: (:obj:`str`, defaults to :obj:`right`):
195
+ The expected padding direction. Can be either :obj:`right` or :obj:`left`
196
+
197
+ pad_id (:obj:`int`, defaults to :obj:`0`):
198
+ The ID corresponding to the padding token
199
+
200
+ pad_type_id (:obj:`int`, defaults to :obj:`0`):
201
+ The type ID corresponding to the padding token
202
+
203
+ pad_token (:obj:`str`, defaults to `[PAD]`):
204
+ The pad token to use
205
+ """
206
+ pass
207
+ @property
208
+ def sequence_ids(self):
209
+ """
210
+ The generated sequence indices.
211
+
212
+ They represent the index of the input sequence associated to each token.
213
+ The sequence id can be None if the token is not related to any input sequence,
214
+ like for example with special tokens.
215
+
216
+ Returns:
217
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
218
+ """
219
+ pass
220
+ def set_sequence_id(self, sequence_id):
221
+ """
222
+ Set the given sequence index
223
+
224
+ Set the given sequence index for the whole range of tokens contained in this
225
+ :class:`~tokenizers.Encoding`.
226
+ """
227
+ pass
228
+ @property
229
+ def special_tokens_mask(self):
230
+ """
231
+ The special token mask
232
+
233
+ This indicates which tokens are special tokens, and which are not.
234
+
235
+ Returns:
236
+ :obj:`List[int]`: The special tokens mask
237
+ """
238
+ pass
239
+ def token_to_chars(self, token_index):
240
+ """
241
+ Get the offsets of the token at the given index.
242
+
243
+ The returned offsets are related to the input sequence that contains the
244
+ token. In order to determine in which input sequence it belongs, you
245
+ must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
246
+
247
+ Args:
248
+ token_index (:obj:`int`):
249
+ The index of a token in the encoded sequence.
250
+
251
+ Returns:
252
+ :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
253
+ """
254
+ pass
255
+ def token_to_sequence(self, token_index):
256
+ """
257
+ Get the index of the sequence represented by the given token.
258
+
259
+ In the general use case, this method returns :obj:`0` for a single sequence or
260
+ the first sequence of a pair, and :obj:`1` for the second sequence of a pair
261
+
262
+ Args:
263
+ token_index (:obj:`int`):
264
+ The index of a token in the encoded sequence.
265
+
266
+ Returns:
267
+ :obj:`int`: The sequence id of the given token
268
+ """
269
+ pass
270
+ def token_to_word(self, token_index):
271
+ """
272
+ Get the index of the word that contains the token in one of the input sequences.
273
+
274
+ The returned word index is related to the input sequence that contains
275
+ the token. In order to determine in which input sequence it belongs, you
276
+ must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
277
+
278
+ Args:
279
+ token_index (:obj:`int`):
280
+ The index of a token in the encoded sequence.
281
+
282
+ Returns:
283
+ :obj:`int`: The index of the word in the relevant input sequence.
284
+ """
285
+ pass
286
+ @property
287
+ def tokens(self):
288
+ """
289
+ The generated tokens
290
+
291
+ They are the string representation of the IDs.
292
+
293
+ Returns:
294
+ :obj:`List[str]`: The list of tokens
295
+ """
296
+ pass
297
+ def truncate(self, max_length, stride=0, direction="right"):
298
+ """
299
+ Truncate the :class:`~tokenizers.Encoding` at the given length
300
+
301
+ If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
302
+ this information is lost. It will be considered as representing a single sequence.
303
+
304
+ Args:
305
+ max_length (:obj:`int`):
306
+ The desired length
307
+
308
+ stride (:obj:`int`, defaults to :obj:`0`):
309
+ The length of previous content to be included in each overflowing piece
310
+
311
+ direction (:obj:`str`, defaults to :obj:`right`):
312
+ Truncate direction
313
+ """
314
+ pass
315
+ @property
316
+ def type_ids(self):
317
+ """
318
+ The generated type IDs
319
+
320
+ Generally used for tasks like sequence classification or question answering,
321
+ these tokens let the LM know which input sequence corresponds to each tokens.
322
+
323
+ Returns:
324
+ :obj:`List[int]`: The list of type ids
325
+ """
326
+ pass
327
+ @property
328
+ def word_ids(self):
329
+ """
330
+ The generated word indices.
331
+
332
+ They represent the index of the word associated to each token.
333
+ When the input is pre-tokenized, they correspond to the ID of the given input label,
334
+ otherwise they correspond to the words indices as defined by the
335
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
336
+
337
+ For special tokens and such (any token that was generated from something that was
338
+ not part of the input), the output is :obj:`None`
339
+
340
+ Returns:
341
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
342
+ """
343
+ pass
344
+ def word_to_chars(self, word_index, sequence_index=0):
345
+ """
346
+ Get the offsets of the word at the given index in one of the input sequences.
347
+
348
+ Args:
349
+ word_index (:obj:`int`):
350
+ The index of a word in one of the input sequences.
351
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
352
+ The index of the sequence that contains the target word
353
+
354
+ Returns:
355
+ :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
356
+ """
357
+ pass
358
+ def word_to_tokens(self, word_index, sequence_index=0):
359
+ """
360
+ Get the encoded tokens corresponding to the word at the given index
361
+ in one of the input sequences.
362
+
363
+ Args:
364
+ word_index (:obj:`int`):
365
+ The index of a word in one of the input sequences.
366
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
367
+ The index of the sequence that contains the target word
368
+
369
+ Returns:
370
+ :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
371
+ """
372
+ pass
373
+ @property
374
+ def words(self):
375
+ """
376
+ The generated word indices.
377
+
378
+ .. warning::
379
+ This is deprecated and will be removed in a future version.
380
+ Please use :obj:`~tokenizers.Encoding.word_ids` instead.
381
+
382
+ They represent the index of the word associated to each token.
383
+ When the input is pre-tokenized, they correspond to the ID of the given input label,
384
+ otherwise they correspond to the words indices as defined by the
385
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
386
+
387
+ For special tokens and such (any token that was generated from something that was
388
+ not part of the input), the output is :obj:`None`
389
+
390
+ Returns:
391
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
392
+ """
393
+ pass
394
+
395
+ class NormalizedString:
396
+ """
397
+ NormalizedString
398
+
399
+ A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
400
+ While making all the requested modifications, it keeps track of the alignment information
401
+ between the two versions of the string.
402
+
403
+ Args:
404
+ sequence: str:
405
+ The string sequence used to initialize this NormalizedString
406
+ """
407
+
408
+ def append(self, s):
409
+ """
410
+ Append the given sequence to the string
411
+ """
412
+ pass
413
+ def clear(self):
414
+ """
415
+ Clears the string
416
+ """
417
+ pass
418
+ def filter(self, func):
419
+ """
420
+ Filter each character of the string using the given func
421
+ """
422
+ pass
423
+ def for_each(self, func):
424
+ """
425
+ Calls the given function for each character of the string
426
+ """
427
+ pass
428
+ def lowercase(self):
429
+ """
430
+ Lowercase the string
431
+ """
432
+ pass
433
+ def lstrip(self):
434
+ """
435
+ Strip the left of the string
436
+ """
437
+ pass
438
+ def map(self, func):
439
+ """
440
+ Calls the given function for each character of the string
441
+
442
+ Replaces each character of the string using the returned value. Each
443
+ returned value **must** be a str of length 1 (ie a character).
444
+ """
445
+ pass
446
+ def nfc(self):
447
+ """
448
+ Runs the NFC normalization
449
+ """
450
+ pass
451
+ def nfd(self):
452
+ """
453
+ Runs the NFD normalization
454
+ """
455
+ pass
456
+ def nfkc(self):
457
+ """
458
+ Runs the NFKC normalization
459
+ """
460
+ pass
461
+ def nfkd(self):
462
+ """
463
+ Runs the NFKD normalization
464
+ """
465
+ pass
466
+ @property
467
+ def normalized(self):
468
+ """
469
+ The normalized part of the string
470
+ """
471
+ pass
472
+ def prepend(self, s):
473
+ """
474
+ Prepend the given sequence to the string
475
+ """
476
+ pass
477
+ def replace(self, pattern, content):
478
+ """
479
+ Replace the content of the given pattern with the provided content
480
+
481
+ Args:
482
+ pattern: Pattern:
483
+ A pattern used to match the string. Usually a string or a Regex
484
+
485
+ content: str:
486
+ The content to be used as replacement
487
+ """
488
+ pass
489
+ def rstrip(self):
490
+ """
491
+ Strip the right of the string
492
+ """
493
+ pass
494
+ def slice(self, range):
495
+ """
496
+ Slice the string using the given range
497
+ """
498
+ pass
499
+ def split(self, pattern, behavior):
500
+ """
501
+ Split the NormalizedString using the given pattern and the specified behavior
502
+
503
+ Args:
504
+ pattern: Pattern:
505
+ A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
506
+
507
+ behavior: SplitDelimiterBehavior:
508
+ The behavior to use when splitting.
509
+ Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
510
+ "contiguous"
511
+
512
+ Returns:
513
+ A list of NormalizedString, representing each split
514
+ """
515
+ pass
516
+ def strip(self):
517
+ """
518
+ Strip both ends of the string
519
+ """
520
+ pass
521
+ def uppercase(self):
522
+ """
523
+ Uppercase the string
524
+ """
525
+ pass
526
+
527
+ class PreTokenizedString:
528
+ """
529
+ PreTokenizedString
530
+
531
+ Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
532
+ underlying string, while keeping track of the alignment information (offsets).
533
+
534
+ The PreTokenizedString manages what we call `splits`. Each split represents a substring
535
+ which is a subpart of the original string, with the relevant offsets and tokens.
536
+
537
+ When calling one of the methods used to modify the PreTokenizedString (namely one of
538
+ `split`, `normalize` or `tokenize), only the `splits` that don't have any associated
539
+ tokens will get modified.
540
+
541
+ Args:
542
+ sequence: str:
543
+ The string sequence used to initialize this PreTokenizedString
544
+ """
545
+
546
+ def __init__(self, sequence):
547
+ pass
548
+ def get_splits(self, offset_referential="original", offset_type="char"):
549
+ """
550
+ Get the splits currently managed by the PreTokenizedString
551
+
552
+ Args:
553
+ offset_referential: :obj:`str`
554
+ Whether the returned splits should have offsets expressed relative
555
+ to the original string, or the normalized one. choices: "original", "normalized".
556
+
557
+ offset_type: :obj:`str`
558
+ Whether the returned splits should have offsets expressed in bytes or chars.
559
+ When slicing an str, we usually want to use chars, which is the default value.
560
+ Now in some cases it might be interesting to get these offsets expressed in bytes,
561
+ so it is possible to change this here.
562
+ choices: "char", "bytes"
563
+
564
+ Returns
565
+ A list of splits
566
+ """
567
+ pass
568
+ def normalize(self, func):
569
+ """
570
+ Normalize each split of the `PreTokenizedString` using the given `func`
571
+
572
+ Args:
573
+ func: Callable[[NormalizedString], None]:
574
+ The function used to normalize each underlying split. This function
575
+ does not need to return anything, just calling the methods on the provided
576
+ NormalizedString allow its modification.
577
+ """
578
+ pass
579
+ def split(self, func):
580
+ """
581
+ Split the PreTokenizedString using the given `func`
582
+
583
+ Args:
584
+ func: Callable[[index, NormalizedString], List[NormalizedString]]:
585
+ The function used to split each underlying split.
586
+ It is expected to return a list of `NormalizedString`, that represent the new
587
+ splits. If the given `NormalizedString` does not need any splitting, we can
588
+ just return it directly.
589
+ In order for the offsets to be tracked accurately, any returned `NormalizedString`
590
+ should come from calling either `.split` or `.slice` on the received one.
591
+ """
592
+ pass
593
+ def to_encoding(self, type_id=0, word_idx=None):
594
+ """
595
+ Return an Encoding generated from this PreTokenizedString
596
+
597
+ Args:
598
+ type_id: int = 0:
599
+ The type_id to be used on the generated Encoding.
600
+
601
+ word_idx: Optional[int] = None:
602
+ An optional word index to be used for each token of this Encoding. If provided,
603
+ all the word indices in the generated Encoding will use this value, instead
604
+ of the one automatically tracked during pre-tokenization.
605
+
606
+ Returns:
607
+ An Encoding
608
+ """
609
+ pass
610
+ def tokenize(self, func):
611
+ """
612
+ Tokenize each split of the `PreTokenizedString` using the given `func`
613
+
614
+ Args:
615
+ func: Callable[[str], List[Token]]:
616
+ The function used to tokenize each underlying split. This function must return
617
+ a list of Token generated from the input str.
618
+ """
619
+ pass
620
+
621
+ class Regex:
622
+ """
623
+ Instantiate a new Regex with the given pattern
624
+ """
625
+
626
+ def __init__(self, pattern):
627
+ pass
628
+
629
+ class Token:
630
+ pass
631
+
632
+ class Tokenizer:
633
+ """
634
+ A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
635
+ and outputs an :class:`~tokenizers.Encoding`.
636
+
637
+ Args:
638
+ model (:class:`~tokenizers.models.Model`):
639
+ The core algorithm that this :obj:`Tokenizer` should be using.
640
+
641
+ """
642
+
643
+ def __init__(self, model):
644
+ pass
645
+ def add_special_tokens(self, tokens):
646
+ """
647
+ Add the given special tokens to the Tokenizer.
648
+
649
+ If these tokens are already part of the vocabulary, it just let the Tokenizer know about
650
+ them. If they don't exist, the Tokenizer creates them, giving them a new id.
651
+
652
+ These special tokens will never be processed by the model (ie won't be split into
653
+ multiple tokens), and they can be removed from the output when decoding.
654
+
655
+ Args:
656
+ tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
657
+ The list of special tokens we want to add to the vocabulary. Each token can either
658
+ be a string or an instance of :class:`~tokenizers.AddedToken` for more
659
+ customization.
660
+
661
+ Returns:
662
+ :obj:`int`: The number of tokens that were created in the vocabulary
663
+ """
664
+ pass
665
+ def add_tokens(self, tokens):
666
+ """
667
+ Add the given tokens to the vocabulary
668
+
669
+ The given tokens are added only if they don't already exist in the vocabulary.
670
+ Each token then gets a new attributed id.
671
+
672
+ Args:
673
+ tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
674
+ The list of tokens we want to add to the vocabulary. Each token can be either a
675
+ string or an instance of :class:`~tokenizers.AddedToken` for more customization.
676
+
677
+ Returns:
678
+ :obj:`int`: The number of tokens that were created in the vocabulary
679
+ """
680
+ pass
681
+ def decode(self, ids, skip_special_tokens=True):
682
+ """
683
+ Decode the given list of ids back to a string
684
+
685
+ This is used to decode anything coming back from a Language Model
686
+
687
+ Args:
688
+ ids (A :obj:`List/Tuple` of :obj:`int`):
689
+ The list of ids that we want to decode
690
+
691
+ skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
692
+ Whether the special tokens should be removed from the decoded string
693
+
694
+ Returns:
695
+ :obj:`str`: The decoded string
696
+ """
697
+ pass
698
+ def decode_batch(self, sequences, skip_special_tokens=True):
699
+ """
700
+ Decode a batch of ids back to their corresponding string
701
+
702
+ Args:
703
+ sequences (:obj:`List` of :obj:`List[int]`):
704
+ The batch of sequences we want to decode
705
+
706
+ skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
707
+ Whether the special tokens should be removed from the decoded strings
708
+
709
+ Returns:
710
+ :obj:`List[str]`: A list of decoded strings
711
+ """
712
+ pass
713
+ @property
714
+ def decoder(self):
715
+ """
716
+ The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
717
+ """
718
+ pass
719
+ def enable_padding(
720
+ self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
721
+ ):
722
+ """
723
+ Enable the padding
724
+
725
+ Args:
726
+ direction (:obj:`str`, `optional`, defaults to :obj:`right`):
727
+ The direction in which to pad. Can be either ``right`` or ``left``
728
+
729
+ pad_to_multiple_of (:obj:`int`, `optional`):
730
+ If specified, the padding length should always snap to the next multiple of the
731
+ given value. For example if we were going to pad witha length of 250 but
732
+ ``pad_to_multiple_of=8`` then we will pad to 256.
733
+
734
+ pad_id (:obj:`int`, defaults to 0):
735
+ The id to be used when padding
736
+
737
+ pad_type_id (:obj:`int`, defaults to 0):
738
+ The type id to be used when padding
739
+
740
+ pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
741
+ The pad token to be used when padding
742
+
743
+ length (:obj:`int`, `optional`):
744
+ If specified, the length at which to pad. If not specified we pad using the size of
745
+ the longest sequence in a batch.
746
+ """
747
+ pass
748
+ def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
749
+ """
750
+ Enable truncation
751
+
752
+ Args:
753
+ max_length (:obj:`int`):
754
+ The max length at which to truncate
755
+
756
+ stride (:obj:`int`, `optional`):
757
+ The length of the previous first sequence to be included in the overflowing
758
+ sequence
759
+
760
+ strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
761
+ The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
762
+ ``only_second``.
763
+
764
+ direction (:obj:`str`, defaults to :obj:`right`):
765
+ Truncate direction
766
+ """
767
+ pass
768
+ def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
769
+ """
770
+ Encode the given sequence and pair. This method can process raw text sequences
771
+ as well as already pre-tokenized sequences.
772
+
773
+ Example:
774
+ Here are some examples of the inputs that are accepted::
775
+
776
+ encode("A single sequence")`
777
+ encode("A sequence", "And its pair")`
778
+ encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
779
+ encode(
780
+ [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
781
+ is_pretokenized=True
782
+ )
783
+
784
+ Args:
785
+ sequence (:obj:`~tokenizers.InputSequence`):
786
+ The main input sequence we want to encode. This sequence can be either raw
787
+ text or pre-tokenized, according to the ``is_pretokenized`` argument:
788
+
789
+ - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
790
+ - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
791
+
792
+ pair (:obj:`~tokenizers.InputSequence`, `optional`):
793
+ An optional input sequence. The expected format is the same that for ``sequence``.
794
+
795
+ is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
796
+ Whether the input is already pre-tokenized
797
+
798
+ add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
799
+ Whether to add the special tokens
800
+
801
+ Returns:
802
+ :class:`~tokenizers.Encoding`: The encoded result
803
+
804
+ """
805
+ pass
806
+ def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
807
+ """
808
+ Encode the given batch of inputs. This method accept both raw text sequences
809
+ as well as already pre-tokenized sequences.
810
+
811
+ Example:
812
+ Here are some examples of the inputs that are accepted::
813
+
814
+ encode_batch([
815
+ "A single sequence",
816
+ ("A tuple with a sequence", "And its pair"),
817
+ [ "A", "pre", "tokenized", "sequence" ],
818
+ ([ "A", "pre", "tokenized", "sequence" ], "And its pair")
819
+ ])
820
+
821
+ Args:
822
+ input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
823
+ A list of single sequences or pair sequences to encode. Each sequence
824
+ can be either raw text or pre-tokenized, according to the ``is_pretokenized``
825
+ argument:
826
+
827
+ - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
828
+ - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
829
+
830
+ is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
831
+ Whether the input is already pre-tokenized
832
+
833
+ add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
834
+ Whether to add the special tokens
835
+
836
+ Returns:
837
+ A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
838
+
839
+ """
840
+ pass
841
+ @property
842
+ def encode_special_tokens(self):
843
+ """
844
+ Modifies the tokenizer in order to use or not the special tokens
845
+ during encoding.
846
+
847
+ Args:
848
+ value (:obj:`bool`):
849
+ Whether to use the special tokens or not
850
+
851
+ """
852
+ pass
853
+ @staticmethod
854
+ def from_buffer(buffer):
855
+ """
856
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
857
+
858
+ Args:
859
+ buffer (:obj:`bytes`):
860
+ A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
861
+
862
+ Returns:
863
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
864
+ """
865
+ pass
866
+ @staticmethod
867
+ def from_file(path):
868
+ """
869
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
870
+
871
+ Args:
872
+ path (:obj:`str`):
873
+ A path to a local JSON file representing a previously serialized
874
+ :class:`~tokenizers.Tokenizer`
875
+
876
+ Returns:
877
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
878
+ """
879
+ pass
880
+ @staticmethod
881
+ def from_pretrained(identifier, revision="main", auth_token=None):
882
+ """
883
+ Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
884
+ Hugging Face Hub.
885
+
886
+ Args:
887
+ identifier (:obj:`str`):
888
+ The identifier of a Model on the Hugging Face Hub, that contains
889
+ a tokenizer.json file
890
+ revision (:obj:`str`, defaults to `main`):
891
+ A branch or commit id
892
+ auth_token (:obj:`str`, `optional`, defaults to `None`):
893
+ An optional auth token used to access private repositories on the
894
+ Hugging Face Hub
895
+
896
+ Returns:
897
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
898
+ """
899
+ pass
900
+ @staticmethod
901
+ def from_str(json):
902
+ """
903
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
904
+
905
+ Args:
906
+ json (:obj:`str`):
907
+ A valid JSON string representing a previously serialized
908
+ :class:`~tokenizers.Tokenizer`
909
+
910
+ Returns:
911
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
912
+ """
913
+ pass
914
+ def get_added_tokens_decoder(self):
915
+ """
916
+ Get the underlying vocabulary
917
+
918
+ Returns:
919
+ :obj:`Dict[int, AddedToken]`: The vocabulary
920
+ """
921
+ pass
922
+ def get_vocab(self, with_added_tokens=True):
923
+ """
924
+ Get the underlying vocabulary
925
+
926
+ Args:
927
+ with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
928
+ Whether to include the added tokens
929
+
930
+ Returns:
931
+ :obj:`Dict[str, int]`: The vocabulary
932
+ """
933
+ pass
934
+ def get_vocab_size(self, with_added_tokens=True):
935
+ """
936
+ Get the size of the underlying vocabulary
937
+
938
+ Args:
939
+ with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
940
+ Whether to include the added tokens
941
+
942
+ Returns:
943
+ :obj:`int`: The size of the vocabulary
944
+ """
945
+ pass
946
+ def id_to_token(self, id):
947
+ """
948
+ Convert the given id to its corresponding token if it exists
949
+
950
+ Args:
951
+ id (:obj:`int`):
952
+ The id to convert
953
+
954
+ Returns:
955
+ :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
956
+ """
957
+ pass
958
+ @property
959
+ def model(self):
960
+ """
961
+ The :class:`~tokenizers.models.Model` in use by the Tokenizer
962
+ """
963
+ pass
964
+ def no_padding(self):
965
+ """
966
+ Disable padding
967
+ """
968
+ pass
969
+ def no_truncation(self):
970
+ """
971
+ Disable truncation
972
+ """
973
+ pass
974
+ @property
975
+ def normalizer(self):
976
+ """
977
+ The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
978
+ """
979
+ pass
980
+ def num_special_tokens_to_add(self, is_pair):
981
+ """
982
+ Return the number of special tokens that would be added for single/pair sentences.
983
+ :param is_pair: Boolean indicating if the input would be a single sentence or a pair
984
+ :return:
985
+ """
986
+ pass
987
+ @property
988
+ def padding(self):
989
+ """
990
+ Get the current padding parameters
991
+
992
+ `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
993
+
994
+ Returns:
995
+ (:obj:`dict`, `optional`):
996
+ A dict with the current padding parameters if padding is enabled
997
+ """
998
+ pass
999
+ def post_process(self, encoding, pair=None, add_special_tokens=True):
1000
+ """
1001
+ Apply all the post-processing steps to the given encodings.
1002
+
1003
+ The various steps are:
1004
+
1005
+ 1. Truncate according to the set truncation params (provided with
1006
+ :meth:`~tokenizers.Tokenizer.enable_truncation`)
1007
+ 2. Apply the :class:`~tokenizers.processors.PostProcessor`
1008
+ 3. Pad according to the set padding params (provided with
1009
+ :meth:`~tokenizers.Tokenizer.enable_padding`)
1010
+
1011
+ Args:
1012
+ encoding (:class:`~tokenizers.Encoding`):
1013
+ The :class:`~tokenizers.Encoding` corresponding to the main sequence.
1014
+
1015
+ pair (:class:`~tokenizers.Encoding`, `optional`):
1016
+ An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
1017
+
1018
+ add_special_tokens (:obj:`bool`):
1019
+ Whether to add the special tokens
1020
+
1021
+ Returns:
1022
+ :class:`~tokenizers.Encoding`: The final post-processed encoding
1023
+ """
1024
+ pass
1025
+ @property
1026
+ def post_processor(self):
1027
+ """
1028
+ The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
1029
+ """
1030
+ pass
1031
+ @property
1032
+ def pre_tokenizer(self):
1033
+ """
1034
+ The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
1035
+ """
1036
+ pass
1037
+ def save(self, path, pretty=True):
1038
+ """
1039
+ Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
1040
+
1041
+ Args:
1042
+ path (:obj:`str`):
1043
+ A path to a file in which to save the serialized tokenizer.
1044
+
1045
+ pretty (:obj:`bool`, defaults to :obj:`True`):
1046
+ Whether the JSON file should be pretty formatted.
1047
+ """
1048
+ pass
1049
+ def to_str(self, pretty=False):
1050
+ """
1051
+ Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
1052
+
1053
+ Args:
1054
+ pretty (:obj:`bool`, defaults to :obj:`False`):
1055
+ Whether the JSON string should be pretty formatted.
1056
+
1057
+ Returns:
1058
+ :obj:`str`: A string representing the serialized Tokenizer
1059
+ """
1060
+ pass
1061
+ def token_to_id(self, token):
1062
+ """
1063
+ Convert the given token to its corresponding id if it exists
1064
+
1065
+ Args:
1066
+ token (:obj:`str`):
1067
+ The token to convert
1068
+
1069
+ Returns:
1070
+ :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
1071
+ """
1072
+ pass
1073
+ def train(self, files, trainer=None):
1074
+ """
1075
+ Train the Tokenizer using the given files.
1076
+
1077
+ Reads the files line by line, while keeping all the whitespace, even new lines.
1078
+ If you want to train from data store in-memory, you can check
1079
+ :meth:`~tokenizers.Tokenizer.train_from_iterator`
1080
+
1081
+ Args:
1082
+ files (:obj:`List[str]`):
1083
+ A list of path to the files that we should use for training
1084
+
1085
+ trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
1086
+ An optional trainer that should be used to train our Model
1087
+ """
1088
+ pass
1089
+ def train_from_iterator(self, iterator, trainer=None, length=None):
1090
+ """
1091
+ Train the Tokenizer using the provided iterator.
1092
+
1093
+ You can provide anything that is a Python Iterator
1094
+
1095
+ * A list of sequences :obj:`List[str]`
1096
+ * A generator that yields :obj:`str` or :obj:`List[str]`
1097
+ * A Numpy array of strings
1098
+ * ...
1099
+
1100
+ Args:
1101
+ iterator (:obj:`Iterator`):
1102
+ Any iterator over strings or list of strings
1103
+
1104
+ trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
1105
+ An optional trainer that should be used to train our Model
1106
+
1107
+ length (:obj:`int`, `optional`):
1108
+ The total number of sequences in the iterator. This is used to
1109
+ provide meaningful progress tracking
1110
+ """
1111
+ pass
1112
+ @property
1113
+ def truncation(self):
1114
+ """
1115
+ Get the currently set truncation parameters
1116
+
1117
+ `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
1118
+
1119
+ Returns:
1120
+ (:obj:`dict`, `optional`):
1121
+ A dict with the current truncation parameters if truncation is enabled
1122
+ """
1123
+ pass
env-llmeval/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/decoders/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import decoders
2
+
3
+
4
+ Decoder = decoders.Decoder
5
+ ByteLevel = decoders.ByteLevel
6
+ Replace = decoders.Replace
7
+ WordPiece = decoders.WordPiece
8
+ ByteFallback = decoders.ByteFallback
9
+ Fuse = decoders.Fuse
10
+ Strip = decoders.Strip
11
+ Metaspace = decoders.Metaspace
12
+ BPEDecoder = decoders.BPEDecoder
13
+ CTC = decoders.CTC
14
+ Sequence = decoders.Sequence
env-llmeval/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class Decoder:
3
+ """
4
+ Base class for all decoders
5
+
6
+ This class is not supposed to be instantiated directly. Instead, any implementation of
7
+ a Decoder will return an instance of this class when instantiated.
8
+ """
9
+
10
+ def decode(self, tokens):
11
+ """
12
+ Decode the given list of tokens to a final string
13
+
14
+ Args:
15
+ tokens (:obj:`List[str]`):
16
+ The list of tokens to decode
17
+
18
+ Returns:
19
+ :obj:`str`: The decoded string
20
+ """
21
+ pass
22
+
23
+ class BPEDecoder(Decoder):
24
+ """
25
+ BPEDecoder Decoder
26
+
27
+ Args:
28
+ suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`):
29
+ The suffix that was used to caracterize an end-of-word. This suffix will
30
+ be replaced by whitespaces during the decoding
31
+ """
32
+
33
+ def __init__(self, suffix="</w>"):
34
+ pass
35
+ def decode(self, tokens):
36
+ """
37
+ Decode the given list of tokens to a final string
38
+
39
+ Args:
40
+ tokens (:obj:`List[str]`):
41
+ The list of tokens to decode
42
+
43
+ Returns:
44
+ :obj:`str`: The decoded string
45
+ """
46
+ pass
47
+
48
+ class ByteFallback(Decoder):
49
+ """
50
+ ByteFallback Decoder
51
+ ByteFallback is a simple trick which converts tokens looking like `<0x61>`
52
+ to pure bytes, and attempts to make them into a string. If the tokens
53
+ cannot be decoded you will get � instead for each inconvertable byte token
54
+
55
+ """
56
+
57
+ def __init__(self):
58
+ pass
59
+ def decode(self, tokens):
60
+ """
61
+ Decode the given list of tokens to a final string
62
+
63
+ Args:
64
+ tokens (:obj:`List[str]`):
65
+ The list of tokens to decode
66
+
67
+ Returns:
68
+ :obj:`str`: The decoded string
69
+ """
70
+ pass
71
+
72
+ class ByteLevel(Decoder):
73
+ """
74
+ ByteLevel Decoder
75
+
76
+ This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
77
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
78
+ """
79
+
80
+ def __init__(self):
81
+ pass
82
+ def decode(self, tokens):
83
+ """
84
+ Decode the given list of tokens to a final string
85
+
86
+ Args:
87
+ tokens (:obj:`List[str]`):
88
+ The list of tokens to decode
89
+
90
+ Returns:
91
+ :obj:`str`: The decoded string
92
+ """
93
+ pass
94
+
95
+ class CTC(Decoder):
96
+ """
97
+ CTC Decoder
98
+
99
+ Args:
100
+ pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`):
101
+ The pad token used by CTC to delimit a new token.
102
+ word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
103
+ The word delimiter token. It will be replaced by a <space>
104
+ cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
105
+ Whether to cleanup some tokenization artifacts.
106
+ Mainly spaces before punctuation, and some abbreviated english forms.
107
+ """
108
+
109
+ def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True):
110
+ pass
111
+ def decode(self, tokens):
112
+ """
113
+ Decode the given list of tokens to a final string
114
+
115
+ Args:
116
+ tokens (:obj:`List[str]`):
117
+ The list of tokens to decode
118
+
119
+ Returns:
120
+ :obj:`str`: The decoded string
121
+ """
122
+ pass
123
+
124
+ class Fuse(Decoder):
125
+ """
126
+ Fuse Decoder
127
+ Fuse simply fuses every token into a single string.
128
+ This is the last step of decoding, this decoder exists only if
129
+ there is need to add other decoders *after* the fusion
130
+ """
131
+
132
+ def __init__(self):
133
+ pass
134
+ def decode(self, tokens):
135
+ """
136
+ Decode the given list of tokens to a final string
137
+
138
+ Args:
139
+ tokens (:obj:`List[str]`):
140
+ The list of tokens to decode
141
+
142
+ Returns:
143
+ :obj:`str`: The decoded string
144
+ """
145
+ pass
146
+
147
+ class Metaspace(Decoder):
148
+ """
149
+ Metaspace Decoder
150
+
151
+ Args:
152
+ replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
153
+ The replacement character. Must be exactly one character. By default we
154
+ use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
155
+
156
+ add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
157
+ Whether to add a space to the first word if there isn't already one. This
158
+ lets us treat `hello` exactly like `say hello`.
159
+ """
160
+
161
+ def __init__(self, replacement="▁", add_prefix_space=True):
162
+ pass
163
+ def decode(self, tokens):
164
+ """
165
+ Decode the given list of tokens to a final string
166
+
167
+ Args:
168
+ tokens (:obj:`List[str]`):
169
+ The list of tokens to decode
170
+
171
+ Returns:
172
+ :obj:`str`: The decoded string
173
+ """
174
+ pass
175
+
176
+ class Replace(Decoder):
177
+ """
178
+ Replace Decoder
179
+
180
+ This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
181
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
182
+ """
183
+
184
+ def __init__(self, pattern, content):
185
+ pass
186
+ def decode(self, tokens):
187
+ """
188
+ Decode the given list of tokens to a final string
189
+
190
+ Args:
191
+ tokens (:obj:`List[str]`):
192
+ The list of tokens to decode
193
+
194
+ Returns:
195
+ :obj:`str`: The decoded string
196
+ """
197
+ pass
198
+
199
+ class Sequence(Decoder):
200
+ """
201
+ Sequence Decoder
202
+
203
+ Args:
204
+ decoders (:obj:`List[Decoder]`)
205
+ The decoders that need to be chained
206
+ """
207
+
208
+ def __init__(self, decoders):
209
+ pass
210
+ def decode(self, tokens):
211
+ """
212
+ Decode the given list of tokens to a final string
213
+
214
+ Args:
215
+ tokens (:obj:`List[str]`):
216
+ The list of tokens to decode
217
+
218
+ Returns:
219
+ :obj:`str`: The decoded string
220
+ """
221
+ pass
222
+
223
+ class Strip(Decoder):
224
+ """
225
+ Strip normalizer
226
+ Strips n left characters of each token, or n right characters of each token
227
+ """
228
+
229
+ def __init__(self, content, left=0, right=0):
230
+ pass
231
+ def decode(self, tokens):
232
+ """
233
+ Decode the given list of tokens to a final string
234
+
235
+ Args:
236
+ tokens (:obj:`List[str]`):
237
+ The list of tokens to decode
238
+
239
+ Returns:
240
+ :obj:`str`: The decoded string
241
+ """
242
+ pass
243
+
244
+ class WordPiece(Decoder):
245
+ """
246
+ WordPiece Decoder
247
+
248
+ Args:
249
+ prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
250
+ The prefix to use for subwords that are not a beginning-of-word
251
+
252
+ cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
253
+ Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
254
+ and some abbreviated english forms.
255
+ """
256
+
257
+ def __init__(self, prefix="##", cleanup=True):
258
+ pass
259
+ def decode(self, tokens):
260
+ """
261
+ Decode the given list of tokens to a final string
262
+
263
+ Args:
264
+ tokens (:obj:`List[str]`):
265
+ The list of tokens to decode
266
+
267
+ Returns:
268
+ :obj:`str`: The decoded string
269
+ """
270
+ pass
env-llmeval/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (410 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .base_tokenizer import BaseTokenizer
2
+ from .bert_wordpiece import BertWordPieceTokenizer
3
+ from .byte_level_bpe import ByteLevelBPETokenizer
4
+ from .char_level_bpe import CharBPETokenizer
5
+ from .sentencepiece_bpe import SentencePieceBPETokenizer
6
+ from .sentencepiece_unigram import SentencePieceUnigramTokenizer
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (561 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/base_tokenizer.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional, Tuple, Union
2
+
3
+ from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
4
+ from tokenizers.decoders import Decoder
5
+ from tokenizers.models import Model
6
+ from tokenizers.normalizers import Normalizer
7
+ from tokenizers.pre_tokenizers import PreTokenizer
8
+ from tokenizers.processors import PostProcessor
9
+
10
+
11
+ Offsets = Tuple[int, int]
12
+
13
+
14
+ class BaseTokenizer:
15
+ def __init__(self, tokenizer: Tokenizer, parameters=None):
16
+ self._tokenizer = tokenizer
17
+ self._parameters = parameters if parameters is not None else {}
18
+
19
+ def __repr__(self):
20
+ return "Tokenizer(vocabulary_size={}, {})".format(
21
+ self._tokenizer.get_vocab_size(),
22
+ ", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
23
+ )
24
+
25
+ def num_special_tokens_to_add(self, is_pair: bool) -> int:
26
+ """
27
+ Return the number of special tokens that would be added for single/pair sentences.
28
+ :param is_pair: Boolean indicating if the input would be a single sentence or a pair
29
+ :return:
30
+ """
31
+ return self._tokenizer.num_special_tokens_to_add(is_pair)
32
+
33
+ def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
34
+ """Returns the vocabulary
35
+
36
+ Args:
37
+ with_added_tokens: boolean:
38
+ Whether to include the added tokens in the vocabulary
39
+
40
+ Returns:
41
+ The vocabulary
42
+ """
43
+ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
44
+
45
+ def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
46
+ """Returns the added reverse vocabulary
47
+
48
+ Returns:
49
+ The added vocabulary mapping ints to AddedTokens
50
+ """
51
+ return self._tokenizer.get_added_tokens_decoder()
52
+
53
+ def get_vocab_size(self, with_added_tokens: bool = True) -> int:
54
+ """Return the size of vocabulary, with or without added tokens.
55
+
56
+ Args:
57
+ with_added_tokens: (`optional`) bool:
58
+ Whether to count in added special tokens or not
59
+
60
+ Returns:
61
+ Size of vocabulary
62
+ """
63
+ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
64
+
65
+ def enable_padding(
66
+ self,
67
+ direction: Optional[str] = "right",
68
+ pad_to_multiple_of: Optional[int] = None,
69
+ pad_id: Optional[int] = 0,
70
+ pad_type_id: Optional[int] = 0,
71
+ pad_token: Optional[str] = "[PAD]",
72
+ length: Optional[int] = None,
73
+ ):
74
+ """Change the padding strategy
75
+
76
+ Args:
77
+ direction: (`optional`) str:
78
+ Can be one of: `right` or `left`
79
+
80
+ pad_to_multiple_of: (`optional`) unsigned int:
81
+ If specified, the padding length should always snap to the next multiple of
82
+ the given value. For example if we were going to pad with a length of 250 but
83
+ `pad_to_multiple_of=8` then we will pad to 256.
84
+
85
+ pad_id: (`optional`) unsigned int:
86
+ The indice to be used when padding
87
+
88
+ pad_type_id: (`optional`) unsigned int:
89
+ The type indice to be used when padding
90
+
91
+ pad_token: (`optional`) str:
92
+ The pad token to be used when padding
93
+
94
+ length: (`optional`) unsigned int:
95
+ If specified, the length at which to pad. If not specified
96
+ we pad using the size of the longest sequence in a batch
97
+ """
98
+ return self._tokenizer.enable_padding(
99
+ direction=direction,
100
+ pad_to_multiple_of=pad_to_multiple_of,
101
+ pad_id=pad_id,
102
+ pad_type_id=pad_type_id,
103
+ pad_token=pad_token,
104
+ length=length,
105
+ )
106
+
107
+ def no_padding(self):
108
+ """Disable padding"""
109
+ return self._tokenizer.no_padding()
110
+
111
+ @property
112
+ def padding(self) -> Optional[dict]:
113
+ """Get the current padding parameters
114
+
115
+ Returns:
116
+ None if padding is disabled, a dict with the currently set parameters
117
+ if the padding is enabled.
118
+ """
119
+ return self._tokenizer.padding
120
+
121
+ def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
122
+ """Change the truncation options
123
+
124
+ Args:
125
+ max_length: unsigned int:
126
+ The maximum length at which to truncate
127
+
128
+ stride: (`optional`) unsigned int:
129
+ The length of the previous first sequence to be included
130
+ in the overflowing sequence
131
+
132
+ strategy: (`optional`) str:
133
+ Can be one of `longest_first`, `only_first` or `only_second`
134
+ """
135
+ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
136
+
137
+ def no_truncation(self):
138
+ """Disable truncation"""
139
+ return self._tokenizer.no_truncation()
140
+
141
+ @property
142
+ def truncation(self) -> Optional[dict]:
143
+ """Get the current truncation parameters
144
+
145
+ Returns:
146
+ None if truncation is disabled, a dict with the current truncation parameters if
147
+ truncation is enabled
148
+ """
149
+ return self._tokenizer.truncation
150
+
151
+ def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
152
+ """Add the given tokens to the vocabulary
153
+
154
+ Args:
155
+ tokens: List[Union[str, AddedToken]]:
156
+ A list of tokens to add to the vocabulary. Each token can either be
157
+ a string, or an instance of AddedToken
158
+
159
+ Returns:
160
+ The number of tokens that were added to the vocabulary
161
+ """
162
+ return self._tokenizer.add_tokens(tokens)
163
+
164
+ def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
165
+ """Add the given special tokens to the vocabulary, and treat them as special tokens.
166
+
167
+ The special tokens will never be processed by the model, and will be
168
+ removed while decoding.
169
+
170
+ Args:
171
+ tokens: List[Union[str, AddedToken]]:
172
+ A list of special tokens to add to the vocabulary. Each token can either be
173
+ a string, or an instance of AddedToken
174
+
175
+ Returns:
176
+ The number of tokens that were added to the vocabulary
177
+ """
178
+ return self._tokenizer.add_special_tokens(special_tokens)
179
+
180
+ def normalize(self, sequence: str) -> str:
181
+ """Normalize the given sequence
182
+
183
+ Args:
184
+ sequence: str:
185
+ The sequence to normalize
186
+
187
+ Returns:
188
+ The normalized string
189
+ """
190
+ return self._tokenizer.normalize(sequence)
191
+
192
+ def encode(
193
+ self,
194
+ sequence: InputSequence,
195
+ pair: Optional[InputSequence] = None,
196
+ is_pretokenized: bool = False,
197
+ add_special_tokens: bool = True,
198
+ ) -> Encoding:
199
+ """Encode the given sequence and pair. This method can process raw text sequences as well
200
+ as already pre-tokenized sequences.
201
+
202
+ Args:
203
+ sequence: InputSequence:
204
+ The sequence we want to encode. This sequence can be either raw text or
205
+ pre-tokenized, according to the `is_pretokenized` argument:
206
+
207
+ - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
208
+ - If `is_pretokenized=True`: `InputSequence` is expected to be
209
+ `Union[List[str], Tuple[str]]`
210
+
211
+ is_pretokenized: bool:
212
+ Whether the input is already pre-tokenized.
213
+
214
+ add_special_tokens: bool:
215
+ Whether to add the special tokens while encoding.
216
+
217
+ Returns:
218
+ An Encoding
219
+ """
220
+ if sequence is None:
221
+ raise ValueError("encode: `sequence` can't be `None`")
222
+
223
+ return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
224
+
225
+ def encode_batch(
226
+ self,
227
+ inputs: List[EncodeInput],
228
+ is_pretokenized: bool = False,
229
+ add_special_tokens: bool = True,
230
+ ) -> List[Encoding]:
231
+ """Encode the given inputs. This method accept both raw text sequences as well as already
232
+ pre-tokenized sequences.
233
+
234
+ Args:
235
+ inputs: List[EncodeInput]:
236
+ A list of single sequences or pair sequences to encode. Each `EncodeInput` is
237
+ expected to be of the following form:
238
+ `Union[InputSequence, Tuple[InputSequence, InputSequence]]`
239
+
240
+ Each `InputSequence` can either be raw text or pre-tokenized,
241
+ according to the `is_pretokenized` argument:
242
+
243
+ - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
244
+ - If `is_pretokenized=True`: `InputSequence` is expected to be
245
+ `Union[List[str], Tuple[str]]`
246
+
247
+ is_pretokenized: bool:
248
+ Whether the input is already pre-tokenized.
249
+
250
+ add_special_tokens: bool:
251
+ Whether to add the special tokens while encoding.
252
+
253
+ Returns:
254
+ A list of Encoding
255
+ """
256
+
257
+ if inputs is None:
258
+ raise ValueError("encode_batch: `inputs` can't be `None`")
259
+
260
+ return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
261
+
262
+ def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
263
+ """Decode the given list of ids to a string sequence
264
+
265
+ Args:
266
+ ids: List[unsigned int]:
267
+ A list of ids to be decoded
268
+
269
+ skip_special_tokens: (`optional`) boolean:
270
+ Whether to remove all the special tokens from the output string
271
+
272
+ Returns:
273
+ The decoded string
274
+ """
275
+ if ids is None:
276
+ raise ValueError("None input is not valid. Should be a list of integers.")
277
+
278
+ return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
279
+
280
+ def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
281
+ """Decode the list of sequences to a list of string sequences
282
+
283
+ Args:
284
+ sequences: List[List[unsigned int]]:
285
+ A list of sequence of ids to be decoded
286
+
287
+ skip_special_tokens: (`optional`) boolean:
288
+ Whether to remove all the special tokens from the output strings
289
+
290
+ Returns:
291
+ A list of decoded strings
292
+ """
293
+ if sequences is None:
294
+ raise ValueError("None input is not valid. Should be list of list of integers.")
295
+
296
+ return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
297
+
298
+ def token_to_id(self, token: str) -> Optional[int]:
299
+ """Convert the given token to its corresponding id
300
+
301
+ Args:
302
+ token: str:
303
+ The token to convert
304
+
305
+ Returns:
306
+ The corresponding id if it exists, None otherwise
307
+ """
308
+ return self._tokenizer.token_to_id(token)
309
+
310
+ def id_to_token(self, id: int) -> Optional[str]:
311
+ """Convert the given token id to its corresponding string
312
+
313
+ Args:
314
+ token: id:
315
+ The token id to convert
316
+
317
+ Returns:
318
+ The corresponding string if it exists, None otherwise
319
+ """
320
+ return self._tokenizer.id_to_token(id)
321
+
322
+ def save_model(self, directory: str, prefix: Optional[str] = None):
323
+ """Save the current model to the given directory
324
+
325
+ Args:
326
+ directory: str:
327
+ A path to the destination directory
328
+
329
+ prefix: (Optional) str:
330
+ An optional prefix, used to prefix each file name
331
+ """
332
+ return self._tokenizer.model.save(directory, prefix=prefix)
333
+
334
+ def save(self, path: str, pretty: bool = True):
335
+ """Save the current Tokenizer at the given path
336
+
337
+ Args:
338
+ path: str:
339
+ A path to the destination Tokenizer file
340
+ """
341
+ return self._tokenizer.save(path, pretty)
342
+
343
+ def to_str(self, pretty: bool = False):
344
+ """Get a serialized JSON version of the Tokenizer as a str
345
+
346
+ Args:
347
+ pretty: bool:
348
+ Whether the JSON string should be prettified
349
+
350
+ Returns:
351
+ str
352
+ """
353
+ return self._tokenizer.to_str(pretty)
354
+
355
+ def post_process(
356
+ self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
357
+ ) -> Encoding:
358
+ """Apply all the post-processing steps to the given encodings.
359
+
360
+ The various steps are:
361
+ 1. Truncate according to global params (provided to `enable_truncation`)
362
+ 2. Apply the PostProcessor
363
+ 3. Pad according to global params. (provided to `enable_padding`)
364
+
365
+ Args:
366
+ encoding: Encoding:
367
+ The main Encoding to post process
368
+
369
+ pair: Optional[Encoding]:
370
+ An optional pair Encoding
371
+
372
+ add_special_tokens: bool:
373
+ Whether to add special tokens
374
+
375
+ Returns:
376
+ The resulting Encoding
377
+ """
378
+ return self._tokenizer.post_process(encoding, pair, add_special_tokens)
379
+
380
+ @property
381
+ def model(self) -> Model:
382
+ return self._tokenizer.model
383
+
384
+ @model.setter
385
+ def model(self, model: Model):
386
+ self._tokenizer.model = model
387
+
388
+ @property
389
+ def normalizer(self) -> Normalizer:
390
+ return self._tokenizer.normalizer
391
+
392
+ @normalizer.setter
393
+ def normalizer(self, normalizer: Normalizer):
394
+ self._tokenizer.normalizer = normalizer
395
+
396
+ @property
397
+ def pre_tokenizer(self) -> PreTokenizer:
398
+ return self._tokenizer.pre_tokenizer
399
+
400
+ @pre_tokenizer.setter
401
+ def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
402
+ self._tokenizer.pre_tokenizer = pre_tokenizer
403
+
404
+ @property
405
+ def post_processor(self) -> PostProcessor:
406
+ return self._tokenizer.post_processor
407
+
408
+ @post_processor.setter
409
+ def post_processor(self, post_processor: PostProcessor):
410
+ self._tokenizer.post_processor = post_processor
411
+
412
+ @property
413
+ def decoder(self) -> Decoder:
414
+ return self._tokenizer.decoder
415
+
416
+ @decoder.setter
417
+ def decoder(self, decoder: Decoder):
418
+ self._tokenizer.decoder = decoder
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/bert_wordpiece.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Union
2
+
3
+ from tokenizers import AddedToken, Tokenizer, decoders, trainers
4
+ from tokenizers.models import WordPiece
5
+ from tokenizers.normalizers import BertNormalizer
6
+ from tokenizers.pre_tokenizers import BertPreTokenizer
7
+ from tokenizers.processors import BertProcessing
8
+
9
+ from .base_tokenizer import BaseTokenizer
10
+
11
+
12
+ class BertWordPieceTokenizer(BaseTokenizer):
13
+ """Bert WordPiece Tokenizer"""
14
+
15
+ def __init__(
16
+ self,
17
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
18
+ unk_token: Union[str, AddedToken] = "[UNK]",
19
+ sep_token: Union[str, AddedToken] = "[SEP]",
20
+ cls_token: Union[str, AddedToken] = "[CLS]",
21
+ pad_token: Union[str, AddedToken] = "[PAD]",
22
+ mask_token: Union[str, AddedToken] = "[MASK]",
23
+ clean_text: bool = True,
24
+ handle_chinese_chars: bool = True,
25
+ strip_accents: Optional[bool] = None,
26
+ lowercase: bool = True,
27
+ wordpieces_prefix: str = "##",
28
+ ):
29
+ if vocab is not None:
30
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
31
+ else:
32
+ tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
33
+
34
+ # Let the tokenizer know about special tokens if they are part of the vocab
35
+ if tokenizer.token_to_id(str(unk_token)) is not None:
36
+ tokenizer.add_special_tokens([str(unk_token)])
37
+ if tokenizer.token_to_id(str(sep_token)) is not None:
38
+ tokenizer.add_special_tokens([str(sep_token)])
39
+ if tokenizer.token_to_id(str(cls_token)) is not None:
40
+ tokenizer.add_special_tokens([str(cls_token)])
41
+ if tokenizer.token_to_id(str(pad_token)) is not None:
42
+ tokenizer.add_special_tokens([str(pad_token)])
43
+ if tokenizer.token_to_id(str(mask_token)) is not None:
44
+ tokenizer.add_special_tokens([str(mask_token)])
45
+
46
+ tokenizer.normalizer = BertNormalizer(
47
+ clean_text=clean_text,
48
+ handle_chinese_chars=handle_chinese_chars,
49
+ strip_accents=strip_accents,
50
+ lowercase=lowercase,
51
+ )
52
+ tokenizer.pre_tokenizer = BertPreTokenizer()
53
+
54
+ if vocab is not None:
55
+ sep_token_id = tokenizer.token_to_id(str(sep_token))
56
+ if sep_token_id is None:
57
+ raise TypeError("sep_token not found in the vocabulary")
58
+ cls_token_id = tokenizer.token_to_id(str(cls_token))
59
+ if cls_token_id is None:
60
+ raise TypeError("cls_token not found in the vocabulary")
61
+
62
+ tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
63
+ tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
64
+
65
+ parameters = {
66
+ "model": "BertWordPiece",
67
+ "unk_token": unk_token,
68
+ "sep_token": sep_token,
69
+ "cls_token": cls_token,
70
+ "pad_token": pad_token,
71
+ "mask_token": mask_token,
72
+ "clean_text": clean_text,
73
+ "handle_chinese_chars": handle_chinese_chars,
74
+ "strip_accents": strip_accents,
75
+ "lowercase": lowercase,
76
+ "wordpieces_prefix": wordpieces_prefix,
77
+ }
78
+
79
+ super().__init__(tokenizer, parameters)
80
+
81
+ @staticmethod
82
+ def from_file(vocab: str, **kwargs):
83
+ vocab = WordPiece.read_file(vocab)
84
+ return BertWordPieceTokenizer(vocab, **kwargs)
85
+
86
+ def train(
87
+ self,
88
+ files: Union[str, List[str]],
89
+ vocab_size: int = 30000,
90
+ min_frequency: int = 2,
91
+ limit_alphabet: int = 1000,
92
+ initial_alphabet: List[str] = [],
93
+ special_tokens: List[Union[str, AddedToken]] = [
94
+ "[PAD]",
95
+ "[UNK]",
96
+ "[CLS]",
97
+ "[SEP]",
98
+ "[MASK]",
99
+ ],
100
+ show_progress: bool = True,
101
+ wordpieces_prefix: str = "##",
102
+ ):
103
+ """Train the model using the given files"""
104
+
105
+ trainer = trainers.WordPieceTrainer(
106
+ vocab_size=vocab_size,
107
+ min_frequency=min_frequency,
108
+ limit_alphabet=limit_alphabet,
109
+ initial_alphabet=initial_alphabet,
110
+ special_tokens=special_tokens,
111
+ show_progress=show_progress,
112
+ continuing_subword_prefix=wordpieces_prefix,
113
+ )
114
+ if isinstance(files, str):
115
+ files = [files]
116
+ self._tokenizer.train(files, trainer=trainer)
117
+
118
+ def train_from_iterator(
119
+ self,
120
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
121
+ vocab_size: int = 30000,
122
+ min_frequency: int = 2,
123
+ limit_alphabet: int = 1000,
124
+ initial_alphabet: List[str] = [],
125
+ special_tokens: List[Union[str, AddedToken]] = [
126
+ "[PAD]",
127
+ "[UNK]",
128
+ "[CLS]",
129
+ "[SEP]",
130
+ "[MASK]",
131
+ ],
132
+ show_progress: bool = True,
133
+ wordpieces_prefix: str = "##",
134
+ length: Optional[int] = None,
135
+ ):
136
+ """Train the model using the given iterator"""
137
+
138
+ trainer = trainers.WordPieceTrainer(
139
+ vocab_size=vocab_size,
140
+ min_frequency=min_frequency,
141
+ limit_alphabet=limit_alphabet,
142
+ initial_alphabet=initial_alphabet,
143
+ special_tokens=special_tokens,
144
+ show_progress=show_progress,
145
+ continuing_subword_prefix=wordpieces_prefix,
146
+ )
147
+ self._tokenizer.train_from_iterator(
148
+ iterator,
149
+ trainer=trainer,
150
+ length=length,
151
+ )
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
2
+
3
+ from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
4
+ from tokenizers.models import BPE
5
+ from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
6
+
7
+ from .base_tokenizer import BaseTokenizer
8
+
9
+
10
+ class ByteLevelBPETokenizer(BaseTokenizer):
11
+ """ByteLevelBPETokenizer
12
+
13
+ Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
19
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
20
+ add_prefix_space: bool = False,
21
+ lowercase: bool = False,
22
+ dropout: Optional[float] = None,
23
+ unicode_normalizer: Optional[str] = None,
24
+ continuing_subword_prefix: Optional[str] = None,
25
+ end_of_word_suffix: Optional[str] = None,
26
+ trim_offsets: bool = False,
27
+ ):
28
+ if vocab is not None and merges is not None:
29
+ tokenizer = Tokenizer(
30
+ BPE(
31
+ vocab,
32
+ merges,
33
+ dropout=dropout,
34
+ continuing_subword_prefix=continuing_subword_prefix or "",
35
+ end_of_word_suffix=end_of_word_suffix or "",
36
+ )
37
+ )
38
+ else:
39
+ tokenizer = Tokenizer(BPE())
40
+
41
+ # Check for Unicode normalization first (before everything else)
42
+ normalizers = []
43
+
44
+ if unicode_normalizer:
45
+ normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
46
+
47
+ if lowercase:
48
+ normalizers += [Lowercase()]
49
+
50
+ # Create the normalizer structure
51
+ if len(normalizers) > 0:
52
+ if len(normalizers) > 1:
53
+ tokenizer.normalizer = Sequence(normalizers)
54
+ else:
55
+ tokenizer.normalizer = normalizers[0]
56
+
57
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
58
+ tokenizer.decoder = decoders.ByteLevel()
59
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
60
+
61
+ parameters = {
62
+ "model": "ByteLevelBPE",
63
+ "add_prefix_space": add_prefix_space,
64
+ "lowercase": lowercase,
65
+ "dropout": dropout,
66
+ "unicode_normalizer": unicode_normalizer,
67
+ "continuing_subword_prefix": continuing_subword_prefix,
68
+ "end_of_word_suffix": end_of_word_suffix,
69
+ "trim_offsets": trim_offsets,
70
+ }
71
+
72
+ super().__init__(tokenizer, parameters)
73
+
74
+ @staticmethod
75
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
76
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
77
+ return ByteLevelBPETokenizer(vocab, merges, **kwargs)
78
+
79
+ def train(
80
+ self,
81
+ files: Union[str, List[str]],
82
+ vocab_size: int = 30000,
83
+ min_frequency: int = 2,
84
+ show_progress: bool = True,
85
+ special_tokens: List[Union[str, AddedToken]] = [],
86
+ ):
87
+ """Train the model using the given files"""
88
+
89
+ trainer = trainers.BpeTrainer(
90
+ vocab_size=vocab_size,
91
+ min_frequency=min_frequency,
92
+ show_progress=show_progress,
93
+ special_tokens=special_tokens,
94
+ initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
95
+ )
96
+ if isinstance(files, str):
97
+ files = [files]
98
+ self._tokenizer.train(files, trainer=trainer)
99
+
100
+ def train_from_iterator(
101
+ self,
102
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
103
+ vocab_size: int = 30000,
104
+ min_frequency: int = 2,
105
+ show_progress: bool = True,
106
+ special_tokens: List[Union[str, AddedToken]] = [],
107
+ length: Optional[int] = None,
108
+ ):
109
+ """Train the model using the given iterator"""
110
+
111
+ trainer = trainers.BpeTrainer(
112
+ vocab_size=vocab_size,
113
+ min_frequency=min_frequency,
114
+ show_progress=show_progress,
115
+ special_tokens=special_tokens,
116
+ initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
117
+ )
118
+ self._tokenizer.train_from_iterator(
119
+ iterator,
120
+ trainer=trainer,
121
+ length=length,
122
+ )
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
2
+
3
+ from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
4
+ from ..models import BPE
5
+ from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
6
+ from .base_tokenizer import BaseTokenizer
7
+
8
+
9
+ class CharBPETokenizer(BaseTokenizer):
10
+ """Original BPE Tokenizer
11
+
12
+ Represents the BPE algorithm, as introduced by Rico Sennrich
13
+ (https://arxiv.org/abs/1508.07909)
14
+
15
+ The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
16
+ Sennrich subword-nmt implementation by the following options that you can deactivate:
17
+ - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
18
+ * removing any control characters and replacing all whitespaces by the classic one.
19
+ * handle chinese chars by putting spaces around them.
20
+ * strip all accents.
21
+ - spitting on punctuation in addition to whitespaces (deactivate it with
22
+ `split_on_whitespace_only=True`)
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
28
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
29
+ unk_token: Union[str, AddedToken] = "<unk>",
30
+ suffix: str = "</w>",
31
+ dropout: Optional[float] = None,
32
+ lowercase: bool = False,
33
+ unicode_normalizer: Optional[str] = None,
34
+ bert_normalizer: bool = True,
35
+ split_on_whitespace_only: bool = False,
36
+ ):
37
+ if vocab is not None and merges is not None:
38
+ tokenizer = Tokenizer(
39
+ BPE(
40
+ vocab,
41
+ merges,
42
+ dropout=dropout,
43
+ unk_token=str(unk_token),
44
+ end_of_word_suffix=suffix,
45
+ )
46
+ )
47
+ else:
48
+ tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
49
+
50
+ if tokenizer.token_to_id(str(unk_token)) is not None:
51
+ tokenizer.add_special_tokens([str(unk_token)])
52
+
53
+ # Check for Unicode normalization first (before everything else)
54
+ normalizers = []
55
+
56
+ if unicode_normalizer:
57
+ normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
58
+
59
+ if bert_normalizer:
60
+ normalizers += [BertNormalizer(lowercase=False)]
61
+
62
+ if lowercase:
63
+ normalizers += [Lowercase()]
64
+
65
+ # Create the normalizer structure
66
+ if len(normalizers) > 0:
67
+ if len(normalizers) > 1:
68
+ tokenizer.normalizer = Sequence(normalizers)
69
+ else:
70
+ tokenizer.normalizer = normalizers[0]
71
+
72
+ if split_on_whitespace_only:
73
+ tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
74
+ else:
75
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
76
+
77
+ tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
78
+
79
+ parameters = {
80
+ "model": "BPE",
81
+ "unk_token": unk_token,
82
+ "suffix": suffix,
83
+ "dropout": dropout,
84
+ "lowercase": lowercase,
85
+ "unicode_normalizer": unicode_normalizer,
86
+ "bert_normalizer": bert_normalizer,
87
+ "split_on_whitespace_only": split_on_whitespace_only,
88
+ }
89
+
90
+ super().__init__(tokenizer, parameters)
91
+
92
+ @staticmethod
93
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
94
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
95
+ return CharBPETokenizer(vocab, merges, **kwargs)
96
+
97
+ def train(
98
+ self,
99
+ files: Union[str, List[str]],
100
+ vocab_size: int = 30000,
101
+ min_frequency: int = 2,
102
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
103
+ limit_alphabet: int = 1000,
104
+ initial_alphabet: List[str] = [],
105
+ suffix: Optional[str] = "</w>",
106
+ show_progress: bool = True,
107
+ ):
108
+ """Train the model using the given files"""
109
+
110
+ trainer = trainers.BpeTrainer(
111
+ vocab_size=vocab_size,
112
+ min_frequency=min_frequency,
113
+ special_tokens=special_tokens,
114
+ limit_alphabet=limit_alphabet,
115
+ initial_alphabet=initial_alphabet,
116
+ end_of_word_suffix=suffix,
117
+ show_progress=show_progress,
118
+ )
119
+ if isinstance(files, str):
120
+ files = [files]
121
+ self._tokenizer.train(files, trainer=trainer)
122
+
123
+ def train_from_iterator(
124
+ self,
125
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
126
+ vocab_size: int = 30000,
127
+ min_frequency: int = 2,
128
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
129
+ limit_alphabet: int = 1000,
130
+ initial_alphabet: List[str] = [],
131
+ suffix: Optional[str] = "</w>",
132
+ show_progress: bool = True,
133
+ length: Optional[int] = None,
134
+ ):
135
+ """Train the model using the given iterator"""
136
+
137
+ trainer = trainers.BpeTrainer(
138
+ vocab_size=vocab_size,
139
+ min_frequency=min_frequency,
140
+ special_tokens=special_tokens,
141
+ limit_alphabet=limit_alphabet,
142
+ initial_alphabet=initial_alphabet,
143
+ end_of_word_suffix=suffix,
144
+ show_progress=show_progress,
145
+ )
146
+ self._tokenizer.train_from_iterator(
147
+ iterator,
148
+ trainer=trainer,
149
+ length=length,
150
+ )
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_bpe.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
2
+
3
+ from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
4
+ from tokenizers.models import BPE
5
+ from tokenizers.normalizers import NFKC
6
+
7
+ from .base_tokenizer import BaseTokenizer
8
+
9
+
10
+ class SentencePieceBPETokenizer(BaseTokenizer):
11
+ """SentencePiece BPE Tokenizer
12
+
13
+ Represents the BPE algorithm, with the pretokenization used by SentencePiece
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
19
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
20
+ unk_token: Union[str, AddedToken] = "<unk>",
21
+ replacement: str = "▁",
22
+ add_prefix_space: bool = True,
23
+ dropout: Optional[float] = None,
24
+ fuse_unk: Optional[bool] = False,
25
+ ):
26
+ if vocab is not None and merges is not None:
27
+ tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
28
+ else:
29
+ tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
30
+
31
+ if tokenizer.token_to_id(str(unk_token)) is not None:
32
+ tokenizer.add_special_tokens([str(unk_token)])
33
+
34
+ tokenizer.normalizer = NFKC()
35
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
36
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
37
+
38
+ parameters = {
39
+ "model": "SentencePieceBPE",
40
+ "unk_token": unk_token,
41
+ "replacement": replacement,
42
+ "add_prefix_space": add_prefix_space,
43
+ "dropout": dropout,
44
+ }
45
+
46
+ super().__init__(tokenizer, parameters)
47
+
48
+ @staticmethod
49
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
50
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
51
+ return SentencePieceBPETokenizer(vocab, merges, **kwargs)
52
+
53
+ def train(
54
+ self,
55
+ files: Union[str, List[str]],
56
+ vocab_size: int = 30000,
57
+ min_frequency: int = 2,
58
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
59
+ limit_alphabet: int = 1000,
60
+ initial_alphabet: List[str] = [],
61
+ show_progress: bool = True,
62
+ ):
63
+ """Train the model using the given files"""
64
+
65
+ trainer = trainers.BpeTrainer(
66
+ vocab_size=vocab_size,
67
+ min_frequency=min_frequency,
68
+ special_tokens=special_tokens,
69
+ limit_alphabet=limit_alphabet,
70
+ initial_alphabet=initial_alphabet,
71
+ show_progress=show_progress,
72
+ )
73
+ if isinstance(files, str):
74
+ files = [files]
75
+ self._tokenizer.train(files, trainer=trainer)
76
+
77
+ def train_from_iterator(
78
+ self,
79
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
80
+ vocab_size: int = 30000,
81
+ min_frequency: int = 2,
82
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
83
+ limit_alphabet: int = 1000,
84
+ initial_alphabet: List[str] = [],
85
+ show_progress: bool = True,
86
+ length: Optional[int] = None,
87
+ ):
88
+ """Train the model using the given iterator"""
89
+
90
+ trainer = trainers.BpeTrainer(
91
+ vocab_size=vocab_size,
92
+ min_frequency=min_frequency,
93
+ special_tokens=special_tokens,
94
+ limit_alphabet=limit_alphabet,
95
+ initial_alphabet=initial_alphabet,
96
+ show_progress=show_progress,
97
+ )
98
+ self._tokenizer.train_from_iterator(
99
+ iterator,
100
+ trainer=trainer,
101
+ length=length,
102
+ )
env-llmeval/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import Iterator, List, Optional, Union, Tuple
4
+
5
+ from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
6
+ from tokenizers.models import Unigram
7
+
8
+ from .base_tokenizer import BaseTokenizer
9
+
10
+
11
+ class SentencePieceUnigramTokenizer(BaseTokenizer):
12
+ """SentencePiece Unigram Tokenizer
13
+
14
+ Represents the Unigram algorithm, with the pretokenization used by SentencePiece
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ vocab: Optional[List[Tuple[str, float]]] = None,
20
+ replacement: str = "▁",
21
+ add_prefix_space: bool = True,
22
+ ):
23
+ if vocab is not None:
24
+ # Let Unigram(..) fail if only one of them is None
25
+ tokenizer = Tokenizer(Unigram(vocab))
26
+ else:
27
+ tokenizer = Tokenizer(Unigram())
28
+
29
+ tokenizer.normalizer = normalizers.Sequence(
30
+ [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
31
+ )
32
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
33
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
34
+
35
+ parameters = {
36
+ "model": "SentencePieceUnigram",
37
+ "replacement": replacement,
38
+ "add_prefix_space": add_prefix_space,
39
+ }
40
+
41
+ super().__init__(tokenizer, parameters)
42
+
43
+ def train(
44
+ self,
45
+ files: Union[str, List[str]],
46
+ vocab_size: int = 8000,
47
+ show_progress: bool = True,
48
+ special_tokens: Optional[List[Union[str, AddedToken]]] = None,
49
+ initial_alphabet: Optional[List[str]] = None,
50
+ unk_token: Optional[str] = None,
51
+ ):
52
+ """
53
+ Train the model using the given files
54
+
55
+ Args:
56
+ files (:obj:`List[str]`):
57
+ A list of path to the files that we should use for training
58
+ vocab_size (:obj:`int`):
59
+ The size of the final vocabulary, including all tokens and alphabet.
60
+ show_progress (:obj:`bool`):
61
+ Whether to show progress bars while training.
62
+ special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
63
+ A list of special tokens the model should know of.
64
+ initial_alphabet (:obj:`List[str]`, `optional`):
65
+ A list of characters to include in the initial alphabet, even
66
+ if not seen in the training dataset.
67
+ If the strings contain more than one character, only the first one
68
+ is kept.
69
+ unk_token (:obj:`str`, `optional`):
70
+ The unknown token to be used by the model.
71
+ """
72
+
73
+ if special_tokens is None:
74
+ special_tokens = []
75
+
76
+ if initial_alphabet is None:
77
+ initial_alphabet = []
78
+
79
+ trainer = trainers.UnigramTrainer(
80
+ vocab_size=vocab_size,
81
+ special_tokens=special_tokens,
82
+ show_progress=show_progress,
83
+ initial_alphabet=initial_alphabet,
84
+ unk_token=unk_token,
85
+ )
86
+
87
+ if isinstance(files, str):
88
+ files = [files]
89
+ self._tokenizer.train(files, trainer=trainer)
90
+
91
+ def train_from_iterator(
92
+ self,
93
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
94
+ vocab_size: int = 8000,
95
+ show_progress: bool = True,
96
+ special_tokens: Optional[List[Union[str, AddedToken]]] = None,
97
+ initial_alphabet: Optional[List[str]] = None,
98
+ unk_token: Optional[str] = None,
99
+ length: Optional[int] = None,
100
+ ):
101
+ """
102
+ Train the model using the given iterator
103
+
104
+ Args:
105
+ iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
106
+ Any iterator over strings or list of strings
107
+ vocab_size (:obj:`int`):
108
+ The size of the final vocabulary, including all tokens and alphabet.
109
+ show_progress (:obj:`bool`):
110
+ Whether to show progress bars while training.
111
+ special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
112
+ A list of special tokens the model should know of.
113
+ initial_alphabet (:obj:`List[str]`, `optional`):
114
+ A list of characters to include in the initial alphabet, even
115
+ if not seen in the training dataset.
116
+ If the strings contain more than one character, only the first one
117
+ is kept.
118
+ unk_token (:obj:`str`, `optional`):
119
+ The unknown token to be used by the model.
120
+ length (:obj:`int`, `optional`):
121
+ The total number of sequences in the iterator. This is used to
122
+ provide meaningful progress tracking
123
+ """
124
+
125
+ if special_tokens is None:
126
+ special_tokens = []
127
+
128
+ if initial_alphabet is None:
129
+ initial_alphabet = []
130
+
131
+ trainer = trainers.UnigramTrainer(
132
+ vocab_size=vocab_size,
133
+ special_tokens=special_tokens,
134
+ show_progress=show_progress,
135
+ initial_alphabet=initial_alphabet,
136
+ unk_token=unk_token,
137
+ )
138
+
139
+ self._tokenizer.train_from_iterator(
140
+ iterator,
141
+ trainer=trainer,
142
+ length=length,
143
+ )
144
+
145
+ @staticmethod
146
+ def from_spm(filename: str):
147
+ try:
148
+ import sys
149
+
150
+ sys.path.append(".")
151
+
152
+ import sentencepiece_model_pb2 as model
153
+ except Exception:
154
+ raise Exception(
155
+ "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
156
+ )
157
+
158
+ m = model.ModelProto()
159
+ m.ParseFromString(open(filename, "rb").read())
160
+
161
+ precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
162
+ vocab = [(piece.piece, piece.score) for piece in m.pieces]
163
+ unk_id = m.trainer_spec.unk_id
164
+ model_type = m.trainer_spec.model_type
165
+ byte_fallback = m.trainer_spec.byte_fallback
166
+ if model_type != 1:
167
+ raise Exception(
168
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
169
+ )
170
+
171
+ replacement = "▁"
172
+ add_prefix_space = True
173
+
174
+ tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
175
+
176
+ if precompiled_charsmap:
177
+ tokenizer.normalizer = normalizers.Sequence(
178
+ [
179
+ normalizers.Precompiled(precompiled_charsmap),
180
+ normalizers.Replace(Regex(" {2,}"), " "),
181
+ ]
182
+ )
183
+ else:
184
+ tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
185
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
186
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
187
+
188
+ parameters = {
189
+ "model": "SentencePieceUnigram",
190
+ }
191
+
192
+ obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
193
+ BaseTokenizer.__init__(obj, tokenizer, parameters)
194
+ return obj
env-llmeval/lib/python3.10/site-packages/tokenizers/models/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ from .. import models
3
+
4
+ Model = models.Model
5
+ BPE = models.BPE
6
+ Unigram = models.Unigram
7
+ WordLevel = models.WordLevel
8
+ WordPiece = models.WordPiece
env-llmeval/lib/python3.10/site-packages/tokenizers/models/__init__.pyi ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class Model:
3
+ """
4
+ Base class for all models
5
+
6
+ The model represents the actual tokenization algorithm. This is the part that
7
+ will contain and manage the learned vocabulary.
8
+
9
+ This class cannot be constructed directly. Please use one of the concrete models.
10
+ """
11
+
12
+ def get_trainer(self):
13
+ """
14
+ Get the associated :class:`~tokenizers.trainers.Trainer`
15
+
16
+ Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
17
+ :class:`~tokenizers.models.Model`.
18
+
19
+ Returns:
20
+ :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
21
+ """
22
+ pass
23
+ def id_to_token(self, id):
24
+ """
25
+ Get the token associated to an ID
26
+
27
+ Args:
28
+ id (:obj:`int`):
29
+ An ID to convert to a token
30
+
31
+ Returns:
32
+ :obj:`str`: The token associated to the ID
33
+ """
34
+ pass
35
+ def save(self, folder, prefix):
36
+ """
37
+ Save the current model
38
+
39
+ Save the current model in the given folder, using the given prefix for the various
40
+ files that will get created.
41
+ Any file with the same name that already exists in this folder will be overwritten.
42
+
43
+ Args:
44
+ folder (:obj:`str`):
45
+ The path to the target folder in which to save the various files
46
+
47
+ prefix (:obj:`str`, `optional`):
48
+ An optional prefix, used to prefix each file name
49
+
50
+ Returns:
51
+ :obj:`List[str]`: The list of saved files
52
+ """
53
+ pass
54
+ def token_to_id(self, tokens):
55
+ """
56
+ Get the ID associated to a token
57
+
58
+ Args:
59
+ token (:obj:`str`):
60
+ A token to convert to an ID
61
+
62
+ Returns:
63
+ :obj:`int`: The ID associated to the token
64
+ """
65
+ pass
66
+ def tokenize(self, sequence):
67
+ """
68
+ Tokenize a sequence
69
+
70
+ Args:
71
+ sequence (:obj:`str`):
72
+ A sequence to tokenize
73
+
74
+ Returns:
75
+ A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
76
+ """
77
+ pass
78
+
79
+ class BPE(Model):
80
+ """
81
+ An implementation of the BPE (Byte-Pair Encoding) algorithm
82
+
83
+ Args:
84
+ vocab (:obj:`Dict[str, int]`, `optional`):
85
+ A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
86
+
87
+ merges (:obj:`List[Tuple[str, str]]`, `optional`):
88
+ A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]`
89
+
90
+ cache_capacity (:obj:`int`, `optional`):
91
+ The number of words that the BPE cache can contain. The cache allows
92
+ to speed-up the process by keeping the result of the merge operations
93
+ for a number of words.
94
+
95
+ dropout (:obj:`float`, `optional`):
96
+ A float between 0 and 1 that represents the BPE dropout to use.
97
+
98
+ unk_token (:obj:`str`, `optional`):
99
+ The unknown token to be used by the model.
100
+
101
+ continuing_subword_prefix (:obj:`str`, `optional`):
102
+ The prefix to attach to subword units that don't represent a beginning of word.
103
+
104
+ end_of_word_suffix (:obj:`str`, `optional`):
105
+ The suffix to attach to subword units that represent an end of word.
106
+
107
+ fuse_unk (:obj:`bool`, `optional`):
108
+ Whether to fuse any subsequent unknown tokens into a single one
109
+
110
+ byte_fallback (:obj:`bool`, `optional`):
111
+ Whether to use spm byte-fallback trick (defaults to False)
112
+ """
113
+
114
+ def __init__(
115
+ self,
116
+ vocab=None,
117
+ merges=None,
118
+ cache_capacity=None,
119
+ dropout=None,
120
+ unk_token=None,
121
+ continuing_subword_prefix=None,
122
+ end_of_word_suffix=None,
123
+ fuse_unk=None,
124
+ byte_fallback=False,
125
+ ):
126
+ pass
127
+ @staticmethod
128
+ def from_file(cls, vocab, merge, **kwargs):
129
+ """
130
+ Instantiate a BPE model from the given files.
131
+
132
+ This method is roughly equivalent to doing::
133
+
134
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
135
+ bpe = BPE(vocab, merges)
136
+
137
+ If you don't need to keep the :obj:`vocab, merges` values lying around,
138
+ this method is more optimized than manually calling
139
+ :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE`
140
+
141
+ Args:
142
+ vocab (:obj:`str`):
143
+ The path to a :obj:`vocab.json` file
144
+
145
+ merges (:obj:`str`):
146
+ The path to a :obj:`merges.txt` file
147
+
148
+ Returns:
149
+ :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files
150
+ """
151
+ pass
152
+ def get_trainer(self):
153
+ """
154
+ Get the associated :class:`~tokenizers.trainers.Trainer`
155
+
156
+ Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
157
+ :class:`~tokenizers.models.Model`.
158
+
159
+ Returns:
160
+ :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
161
+ """
162
+ pass
163
+ def id_to_token(self, id):
164
+ """
165
+ Get the token associated to an ID
166
+
167
+ Args:
168
+ id (:obj:`int`):
169
+ An ID to convert to a token
170
+
171
+ Returns:
172
+ :obj:`str`: The token associated to the ID
173
+ """
174
+ pass
175
+ @staticmethod
176
+ def read_file(self, vocab, merges):
177
+ """
178
+ Read a :obj:`vocab.json` and a :obj:`merges.txt` files
179
+
180
+ This method provides a way to read and parse the content of these files,
181
+ returning the relevant data structures. If you want to instantiate some BPE models
182
+ from memory, this method gives you the expected input from the standard files.
183
+
184
+ Args:
185
+ vocab (:obj:`str`):
186
+ The path to a :obj:`vocab.json` file
187
+
188
+ merges (:obj:`str`):
189
+ The path to a :obj:`merges.txt` file
190
+
191
+ Returns:
192
+ A :obj:`Tuple` with the vocab and the merges:
193
+ The vocabulary and merges loaded into memory
194
+ """
195
+ pass
196
+ def save(self, folder, prefix):
197
+ """
198
+ Save the current model
199
+
200
+ Save the current model in the given folder, using the given prefix for the various
201
+ files that will get created.
202
+ Any file with the same name that already exists in this folder will be overwritten.
203
+
204
+ Args:
205
+ folder (:obj:`str`):
206
+ The path to the target folder in which to save the various files
207
+
208
+ prefix (:obj:`str`, `optional`):
209
+ An optional prefix, used to prefix each file name
210
+
211
+ Returns:
212
+ :obj:`List[str]`: The list of saved files
213
+ """
214
+ pass
215
+ def token_to_id(self, tokens):
216
+ """
217
+ Get the ID associated to a token
218
+
219
+ Args:
220
+ token (:obj:`str`):
221
+ A token to convert to an ID
222
+
223
+ Returns:
224
+ :obj:`int`: The ID associated to the token
225
+ """
226
+ pass
227
+ def tokenize(self, sequence):
228
+ """
229
+ Tokenize a sequence
230
+
231
+ Args:
232
+ sequence (:obj:`str`):
233
+ A sequence to tokenize
234
+
235
+ Returns:
236
+ A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
237
+ """
238
+ pass
239
+
240
+ class Unigram(Model):
241
+ """
242
+ An implementation of the Unigram algorithm
243
+
244
+ Args:
245
+ vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`):
246
+ A list of vocabulary items and their relative score [("am", -0.2442),...]
247
+ """
248
+
249
+ def __init__(self, vocab, unk_id, byte_fallback):
250
+ pass
251
+ def get_trainer(self):
252
+ """
253
+ Get the associated :class:`~tokenizers.trainers.Trainer`
254
+
255
+ Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
256
+ :class:`~tokenizers.models.Model`.
257
+
258
+ Returns:
259
+ :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
260
+ """
261
+ pass
262
+ def id_to_token(self, id):
263
+ """
264
+ Get the token associated to an ID
265
+
266
+ Args:
267
+ id (:obj:`int`):
268
+ An ID to convert to a token
269
+
270
+ Returns:
271
+ :obj:`str`: The token associated to the ID
272
+ """
273
+ pass
274
+ def save(self, folder, prefix):
275
+ """
276
+ Save the current model
277
+
278
+ Save the current model in the given folder, using the given prefix for the various
279
+ files that will get created.
280
+ Any file with the same name that already exists in this folder will be overwritten.
281
+
282
+ Args:
283
+ folder (:obj:`str`):
284
+ The path to the target folder in which to save the various files
285
+
286
+ prefix (:obj:`str`, `optional`):
287
+ An optional prefix, used to prefix each file name
288
+
289
+ Returns:
290
+ :obj:`List[str]`: The list of saved files
291
+ """
292
+ pass
293
+ def token_to_id(self, tokens):
294
+ """
295
+ Get the ID associated to a token
296
+
297
+ Args:
298
+ token (:obj:`str`):
299
+ A token to convert to an ID
300
+
301
+ Returns:
302
+ :obj:`int`: The ID associated to the token
303
+ """
304
+ pass
305
+ def tokenize(self, sequence):
306
+ """
307
+ Tokenize a sequence
308
+
309
+ Args:
310
+ sequence (:obj:`str`):
311
+ A sequence to tokenize
312
+
313
+ Returns:
314
+ A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
315
+ """
316
+ pass
317
+
318
+ class WordLevel(Model):
319
+ """
320
+ An implementation of the WordLevel algorithm
321
+
322
+ Most simple tokenizer model based on mapping tokens to their corresponding id.
323
+
324
+ Args:
325
+ vocab (:obj:`str`, `optional`):
326
+ A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
327
+
328
+ unk_token (:obj:`str`, `optional`):
329
+ The unknown token to be used by the model.
330
+ """
331
+
332
+ def __init__(self, vocab, unk_token):
333
+ pass
334
+ @staticmethod
335
+ def from_file(vocab, unk_token):
336
+ """
337
+ Instantiate a WordLevel model from the given file
338
+
339
+ This method is roughly equivalent to doing::
340
+
341
+ vocab = WordLevel.read_file(vocab_filename)
342
+ wordlevel = WordLevel(vocab)
343
+
344
+ If you don't need to keep the :obj:`vocab` values lying around, this method is
345
+ more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to
346
+ initialize a :class:`~tokenizers.models.WordLevel`
347
+
348
+ Args:
349
+ vocab (:obj:`str`):
350
+ The path to a :obj:`vocab.json` file
351
+
352
+ Returns:
353
+ :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file
354
+ """
355
+ pass
356
+ def get_trainer(self):
357
+ """
358
+ Get the associated :class:`~tokenizers.trainers.Trainer`
359
+
360
+ Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
361
+ :class:`~tokenizers.models.Model`.
362
+
363
+ Returns:
364
+ :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
365
+ """
366
+ pass
367
+ def id_to_token(self, id):
368
+ """
369
+ Get the token associated to an ID
370
+
371
+ Args:
372
+ id (:obj:`int`):
373
+ An ID to convert to a token
374
+
375
+ Returns:
376
+ :obj:`str`: The token associated to the ID
377
+ """
378
+ pass
379
+ @staticmethod
380
+ def read_file(vocab):
381
+ """
382
+ Read a :obj:`vocab.json`
383
+
384
+ This method provides a way to read and parse the content of a vocabulary file,
385
+ returning the relevant data structures. If you want to instantiate some WordLevel models
386
+ from memory, this method gives you the expected input from the standard files.
387
+
388
+ Args:
389
+ vocab (:obj:`str`):
390
+ The path to a :obj:`vocab.json` file
391
+
392
+ Returns:
393
+ :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
394
+ """
395
+ pass
396
+ def save(self, folder, prefix):
397
+ """
398
+ Save the current model
399
+
400
+ Save the current model in the given folder, using the given prefix for the various
401
+ files that will get created.
402
+ Any file with the same name that already exists in this folder will be overwritten.
403
+
404
+ Args:
405
+ folder (:obj:`str`):
406
+ The path to the target folder in which to save the various files
407
+
408
+ prefix (:obj:`str`, `optional`):
409
+ An optional prefix, used to prefix each file name
410
+
411
+ Returns:
412
+ :obj:`List[str]`: The list of saved files
413
+ """
414
+ pass
415
+ def token_to_id(self, tokens):
416
+ """
417
+ Get the ID associated to a token
418
+
419
+ Args:
420
+ token (:obj:`str`):
421
+ A token to convert to an ID
422
+
423
+ Returns:
424
+ :obj:`int`: The ID associated to the token
425
+ """
426
+ pass
427
+ def tokenize(self, sequence):
428
+ """
429
+ Tokenize a sequence
430
+
431
+ Args:
432
+ sequence (:obj:`str`):
433
+ A sequence to tokenize
434
+
435
+ Returns:
436
+ A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
437
+ """
438
+ pass
439
+
440
+ class WordPiece(Model):
441
+ """
442
+ An implementation of the WordPiece algorithm
443
+
444
+ Args:
445
+ vocab (:obj:`Dict[str, int]`, `optional`):
446
+ A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
447
+
448
+ unk_token (:obj:`str`, `optional`):
449
+ The unknown token to be used by the model.
450
+
451
+ max_input_chars_per_word (:obj:`int`, `optional`):
452
+ The maximum number of characters to authorize in a single word.
453
+ """
454
+
455
+ def __init__(self, vocab, unk_token, max_input_chars_per_word):
456
+ pass
457
+ @staticmethod
458
+ def from_file(vocab, **kwargs):
459
+ """
460
+ Instantiate a WordPiece model from the given file
461
+
462
+ This method is roughly equivalent to doing::
463
+
464
+ vocab = WordPiece.read_file(vocab_filename)
465
+ wordpiece = WordPiece(vocab)
466
+
467
+ If you don't need to keep the :obj:`vocab` values lying around, this method is
468
+ more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to
469
+ initialize a :class:`~tokenizers.models.WordPiece`
470
+
471
+ Args:
472
+ vocab (:obj:`str`):
473
+ The path to a :obj:`vocab.txt` file
474
+
475
+ Returns:
476
+ :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file
477
+ """
478
+ pass
479
+ def get_trainer(self):
480
+ """
481
+ Get the associated :class:`~tokenizers.trainers.Trainer`
482
+
483
+ Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
484
+ :class:`~tokenizers.models.Model`.
485
+
486
+ Returns:
487
+ :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
488
+ """
489
+ pass
490
+ def id_to_token(self, id):
491
+ """
492
+ Get the token associated to an ID
493
+
494
+ Args:
495
+ id (:obj:`int`):
496
+ An ID to convert to a token
497
+
498
+ Returns:
499
+ :obj:`str`: The token associated to the ID
500
+ """
501
+ pass
502
+ @staticmethod
503
+ def read_file(vocab):
504
+ """
505
+ Read a :obj:`vocab.txt` file
506
+
507
+ This method provides a way to read and parse the content of a standard `vocab.txt`
508
+ file as used by the WordPiece Model, returning the relevant data structures. If you
509
+ want to instantiate some WordPiece models from memory, this method gives you the
510
+ expected input from the standard files.
511
+
512
+ Args:
513
+ vocab (:obj:`str`):
514
+ The path to a :obj:`vocab.txt` file
515
+
516
+ Returns:
517
+ :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
518
+ """
519
+ pass
520
+ def save(self, folder, prefix):
521
+ """
522
+ Save the current model
523
+
524
+ Save the current model in the given folder, using the given prefix for the various
525
+ files that will get created.
526
+ Any file with the same name that already exists in this folder will be overwritten.
527
+
528
+ Args:
529
+ folder (:obj:`str`):
530
+ The path to the target folder in which to save the various files
531
+
532
+ prefix (:obj:`str`, `optional`):
533
+ An optional prefix, used to prefix each file name
534
+
535
+ Returns:
536
+ :obj:`List[str]`: The list of saved files
537
+ """
538
+ pass
539
+ def token_to_id(self, tokens):
540
+ """
541
+ Get the ID associated to a token
542
+
543
+ Args:
544
+ token (:obj:`str`):
545
+ A token to convert to an ID
546
+
547
+ Returns:
548
+ :obj:`int`: The ID associated to the token
549
+ """
550
+ pass
551
+ def tokenize(self, sequence):
552
+ """
553
+ Tokenize a sequence
554
+
555
+ Args:
556
+ sequence (:obj:`str`):
557
+ A sequence to tokenize
558
+
559
+ Returns:
560
+ A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
561
+ """
562
+ pass
env-llmeval/lib/python3.10/site-packages/tokenizers/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (296 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/normalizers/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import normalizers
2
+
3
+
4
+ Normalizer = normalizers.Normalizer
5
+ BertNormalizer = normalizers.BertNormalizer
6
+ NFD = normalizers.NFD
7
+ NFKD = normalizers.NFKD
8
+ NFC = normalizers.NFC
9
+ NFKC = normalizers.NFKC
10
+ Sequence = normalizers.Sequence
11
+ Lowercase = normalizers.Lowercase
12
+ Prepend = normalizers.Prepend
13
+ Strip = normalizers.Strip
14
+ StripAccents = normalizers.StripAccents
15
+ Nmt = normalizers.Nmt
16
+ Precompiled = normalizers.Precompiled
17
+ Replace = normalizers.Replace
18
+
19
+
20
+ NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD}
21
+
22
+
23
+ def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
24
+ if normalizer not in NORMALIZERS:
25
+ raise ValueError(
26
+ "{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys())
27
+ )
28
+
29
+ return NORMALIZERS[normalizer]()
env-llmeval/lib/python3.10/site-packages/tokenizers/normalizers/__init__.pyi ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class Normalizer:
3
+ """
4
+ Base class for all normalizers
5
+
6
+ This class is not supposed to be instantiated directly. Instead, any implementation of a
7
+ Normalizer will return an instance of this class when instantiated.
8
+ """
9
+
10
+ def normalize(self, normalized):
11
+ """
12
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
13
+
14
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
15
+ keep track of the alignment information. If you just want to see the result
16
+ of the normalization on a raw string, you can use
17
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
18
+
19
+ Args:
20
+ normalized (:class:`~tokenizers.NormalizedString`):
21
+ The normalized string on which to apply this
22
+ :class:`~tokenizers.normalizers.Normalizer`
23
+ """
24
+ pass
25
+ def normalize_str(self, sequence):
26
+ """
27
+ Normalize the given string
28
+
29
+ This method provides a way to visualize the effect of a
30
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
31
+ information. If you need to get/convert offsets, you can use
32
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
33
+
34
+ Args:
35
+ sequence (:obj:`str`):
36
+ A string to normalize
37
+
38
+ Returns:
39
+ :obj:`str`: A string after normalization
40
+ """
41
+ pass
42
+
43
+ class BertNormalizer(Normalizer):
44
+ """
45
+ BertNormalizer
46
+
47
+ Takes care of normalizing raw text before giving it to a Bert model.
48
+ This includes cleaning the text, handling accents, chinese chars and lowercasing
49
+
50
+ Args:
51
+ clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
52
+ Whether to clean the text, by removing any control characters
53
+ and replacing all whitespaces by the classic one.
54
+
55
+ handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
56
+ Whether to handle chinese chars by putting spaces around them.
57
+
58
+ strip_accents (:obj:`bool`, `optional`):
59
+ Whether to strip all accents. If this option is not specified (ie == None),
60
+ then it will be determined by the value for `lowercase` (as in the original Bert).
61
+
62
+ lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`):
63
+ Whether to lowercase.
64
+ """
65
+
66
+ def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True):
67
+ pass
68
+ def normalize(self, normalized):
69
+ """
70
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
71
+
72
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
73
+ keep track of the alignment information. If you just want to see the result
74
+ of the normalization on a raw string, you can use
75
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
76
+
77
+ Args:
78
+ normalized (:class:`~tokenizers.NormalizedString`):
79
+ The normalized string on which to apply this
80
+ :class:`~tokenizers.normalizers.Normalizer`
81
+ """
82
+ pass
83
+ def normalize_str(self, sequence):
84
+ """
85
+ Normalize the given string
86
+
87
+ This method provides a way to visualize the effect of a
88
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
89
+ information. If you need to get/convert offsets, you can use
90
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
91
+
92
+ Args:
93
+ sequence (:obj:`str`):
94
+ A string to normalize
95
+
96
+ Returns:
97
+ :obj:`str`: A string after normalization
98
+ """
99
+ pass
100
+
101
+ class Lowercase(Normalizer):
102
+ """
103
+ Lowercase Normalizer
104
+ """
105
+
106
+ def __init__(self):
107
+ pass
108
+ def normalize(self, normalized):
109
+ """
110
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
111
+
112
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
113
+ keep track of the alignment information. If you just want to see the result
114
+ of the normalization on a raw string, you can use
115
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
116
+
117
+ Args:
118
+ normalized (:class:`~tokenizers.NormalizedString`):
119
+ The normalized string on which to apply this
120
+ :class:`~tokenizers.normalizers.Normalizer`
121
+ """
122
+ pass
123
+ def normalize_str(self, sequence):
124
+ """
125
+ Normalize the given string
126
+
127
+ This method provides a way to visualize the effect of a
128
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
129
+ information. If you need to get/convert offsets, you can use
130
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
131
+
132
+ Args:
133
+ sequence (:obj:`str`):
134
+ A string to normalize
135
+
136
+ Returns:
137
+ :obj:`str`: A string after normalization
138
+ """
139
+ pass
140
+
141
+ class NFC(Normalizer):
142
+ """
143
+ NFC Unicode Normalizer
144
+ """
145
+
146
+ def __init__(self):
147
+ pass
148
+ def normalize(self, normalized):
149
+ """
150
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
151
+
152
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
153
+ keep track of the alignment information. If you just want to see the result
154
+ of the normalization on a raw string, you can use
155
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
156
+
157
+ Args:
158
+ normalized (:class:`~tokenizers.NormalizedString`):
159
+ The normalized string on which to apply this
160
+ :class:`~tokenizers.normalizers.Normalizer`
161
+ """
162
+ pass
163
+ def normalize_str(self, sequence):
164
+ """
165
+ Normalize the given string
166
+
167
+ This method provides a way to visualize the effect of a
168
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
169
+ information. If you need to get/convert offsets, you can use
170
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
171
+
172
+ Args:
173
+ sequence (:obj:`str`):
174
+ A string to normalize
175
+
176
+ Returns:
177
+ :obj:`str`: A string after normalization
178
+ """
179
+ pass
180
+
181
+ class NFD(Normalizer):
182
+ """
183
+ NFD Unicode Normalizer
184
+ """
185
+
186
+ def __init__(self):
187
+ pass
188
+ def normalize(self, normalized):
189
+ """
190
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
191
+
192
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
193
+ keep track of the alignment information. If you just want to see the result
194
+ of the normalization on a raw string, you can use
195
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
196
+
197
+ Args:
198
+ normalized (:class:`~tokenizers.NormalizedString`):
199
+ The normalized string on which to apply this
200
+ :class:`~tokenizers.normalizers.Normalizer`
201
+ """
202
+ pass
203
+ def normalize_str(self, sequence):
204
+ """
205
+ Normalize the given string
206
+
207
+ This method provides a way to visualize the effect of a
208
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
209
+ information. If you need to get/convert offsets, you can use
210
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
211
+
212
+ Args:
213
+ sequence (:obj:`str`):
214
+ A string to normalize
215
+
216
+ Returns:
217
+ :obj:`str`: A string after normalization
218
+ """
219
+ pass
220
+
221
+ class NFKC(Normalizer):
222
+ """
223
+ NFKC Unicode Normalizer
224
+ """
225
+
226
+ def __init__(self):
227
+ pass
228
+ def normalize(self, normalized):
229
+ """
230
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
231
+
232
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
233
+ keep track of the alignment information. If you just want to see the result
234
+ of the normalization on a raw string, you can use
235
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
236
+
237
+ Args:
238
+ normalized (:class:`~tokenizers.NormalizedString`):
239
+ The normalized string on which to apply this
240
+ :class:`~tokenizers.normalizers.Normalizer`
241
+ """
242
+ pass
243
+ def normalize_str(self, sequence):
244
+ """
245
+ Normalize the given string
246
+
247
+ This method provides a way to visualize the effect of a
248
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
249
+ information. If you need to get/convert offsets, you can use
250
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
251
+
252
+ Args:
253
+ sequence (:obj:`str`):
254
+ A string to normalize
255
+
256
+ Returns:
257
+ :obj:`str`: A string after normalization
258
+ """
259
+ pass
260
+
261
+ class NFKD(Normalizer):
262
+ """
263
+ NFKD Unicode Normalizer
264
+ """
265
+
266
+ def __init__(self):
267
+ pass
268
+ def normalize(self, normalized):
269
+ """
270
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
271
+
272
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
273
+ keep track of the alignment information. If you just want to see the result
274
+ of the normalization on a raw string, you can use
275
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
276
+
277
+ Args:
278
+ normalized (:class:`~tokenizers.NormalizedString`):
279
+ The normalized string on which to apply this
280
+ :class:`~tokenizers.normalizers.Normalizer`
281
+ """
282
+ pass
283
+ def normalize_str(self, sequence):
284
+ """
285
+ Normalize the given string
286
+
287
+ This method provides a way to visualize the effect of a
288
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
289
+ information. If you need to get/convert offsets, you can use
290
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
291
+
292
+ Args:
293
+ sequence (:obj:`str`):
294
+ A string to normalize
295
+
296
+ Returns:
297
+ :obj:`str`: A string after normalization
298
+ """
299
+ pass
300
+
301
+ class Nmt(Normalizer):
302
+ """
303
+ Nmt normalizer
304
+ """
305
+
306
+ def __init__(self):
307
+ pass
308
+ def normalize(self, normalized):
309
+ """
310
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
311
+
312
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
313
+ keep track of the alignment information. If you just want to see the result
314
+ of the normalization on a raw string, you can use
315
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
316
+
317
+ Args:
318
+ normalized (:class:`~tokenizers.NormalizedString`):
319
+ The normalized string on which to apply this
320
+ :class:`~tokenizers.normalizers.Normalizer`
321
+ """
322
+ pass
323
+ def normalize_str(self, sequence):
324
+ """
325
+ Normalize the given string
326
+
327
+ This method provides a way to visualize the effect of a
328
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
329
+ information. If you need to get/convert offsets, you can use
330
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
331
+
332
+ Args:
333
+ sequence (:obj:`str`):
334
+ A string to normalize
335
+
336
+ Returns:
337
+ :obj:`str`: A string after normalization
338
+ """
339
+ pass
340
+
341
+ class Precompiled(Normalizer):
342
+ """
343
+ Precompiled normalizer
344
+ Don't use manually it is used for compatiblity for SentencePiece.
345
+ """
346
+
347
+ def __init__(self, precompiled_charsmap):
348
+ pass
349
+ def normalize(self, normalized):
350
+ """
351
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
352
+
353
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
354
+ keep track of the alignment information. If you just want to see the result
355
+ of the normalization on a raw string, you can use
356
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
357
+
358
+ Args:
359
+ normalized (:class:`~tokenizers.NormalizedString`):
360
+ The normalized string on which to apply this
361
+ :class:`~tokenizers.normalizers.Normalizer`
362
+ """
363
+ pass
364
+ def normalize_str(self, sequence):
365
+ """
366
+ Normalize the given string
367
+
368
+ This method provides a way to visualize the effect of a
369
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
370
+ information. If you need to get/convert offsets, you can use
371
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
372
+
373
+ Args:
374
+ sequence (:obj:`str`):
375
+ A string to normalize
376
+
377
+ Returns:
378
+ :obj:`str`: A string after normalization
379
+ """
380
+ pass
381
+
382
+ class Prepend(Normalizer):
383
+ """
384
+ Prepend normalizer
385
+ """
386
+
387
+ def __init__(self, prepend):
388
+ pass
389
+ def normalize(self, normalized):
390
+ """
391
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
392
+
393
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
394
+ keep track of the alignment information. If you just want to see the result
395
+ of the normalization on a raw string, you can use
396
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
397
+
398
+ Args:
399
+ normalized (:class:`~tokenizers.NormalizedString`):
400
+ The normalized string on which to apply this
401
+ :class:`~tokenizers.normalizers.Normalizer`
402
+ """
403
+ pass
404
+ def normalize_str(self, sequence):
405
+ """
406
+ Normalize the given string
407
+
408
+ This method provides a way to visualize the effect of a
409
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
410
+ information. If you need to get/convert offsets, you can use
411
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
412
+
413
+ Args:
414
+ sequence (:obj:`str`):
415
+ A string to normalize
416
+
417
+ Returns:
418
+ :obj:`str`: A string after normalization
419
+ """
420
+ pass
421
+
422
+ class Replace(Normalizer):
423
+ """
424
+ Replace normalizer
425
+ """
426
+
427
+ def __init__(self, pattern, content):
428
+ pass
429
+ def normalize(self, normalized):
430
+ """
431
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
432
+
433
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
434
+ keep track of the alignment information. If you just want to see the result
435
+ of the normalization on a raw string, you can use
436
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
437
+
438
+ Args:
439
+ normalized (:class:`~tokenizers.NormalizedString`):
440
+ The normalized string on which to apply this
441
+ :class:`~tokenizers.normalizers.Normalizer`
442
+ """
443
+ pass
444
+ def normalize_str(self, sequence):
445
+ """
446
+ Normalize the given string
447
+
448
+ This method provides a way to visualize the effect of a
449
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
450
+ information. If you need to get/convert offsets, you can use
451
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
452
+
453
+ Args:
454
+ sequence (:obj:`str`):
455
+ A string to normalize
456
+
457
+ Returns:
458
+ :obj:`str`: A string after normalization
459
+ """
460
+ pass
461
+
462
+ class Sequence(Normalizer):
463
+ """
464
+ Allows concatenating multiple other Normalizer as a Sequence.
465
+ All the normalizers run in sequence in the given order
466
+
467
+ Args:
468
+ normalizers (:obj:`List[Normalizer]`):
469
+ A list of Normalizer to be run as a sequence
470
+ """
471
+
472
+ def normalize(self, normalized):
473
+ """
474
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
475
+
476
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
477
+ keep track of the alignment information. If you just want to see the result
478
+ of the normalization on a raw string, you can use
479
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
480
+
481
+ Args:
482
+ normalized (:class:`~tokenizers.NormalizedString`):
483
+ The normalized string on which to apply this
484
+ :class:`~tokenizers.normalizers.Normalizer`
485
+ """
486
+ pass
487
+ def normalize_str(self, sequence):
488
+ """
489
+ Normalize the given string
490
+
491
+ This method provides a way to visualize the effect of a
492
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
493
+ information. If you need to get/convert offsets, you can use
494
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
495
+
496
+ Args:
497
+ sequence (:obj:`str`):
498
+ A string to normalize
499
+
500
+ Returns:
501
+ :obj:`str`: A string after normalization
502
+ """
503
+ pass
504
+
505
+ class Strip(Normalizer):
506
+ """
507
+ Strip normalizer
508
+ """
509
+
510
+ def __init__(self, left=True, right=True):
511
+ pass
512
+ def normalize(self, normalized):
513
+ """
514
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
515
+
516
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
517
+ keep track of the alignment information. If you just want to see the result
518
+ of the normalization on a raw string, you can use
519
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
520
+
521
+ Args:
522
+ normalized (:class:`~tokenizers.NormalizedString`):
523
+ The normalized string on which to apply this
524
+ :class:`~tokenizers.normalizers.Normalizer`
525
+ """
526
+ pass
527
+ def normalize_str(self, sequence):
528
+ """
529
+ Normalize the given string
530
+
531
+ This method provides a way to visualize the effect of a
532
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
533
+ information. If you need to get/convert offsets, you can use
534
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
535
+
536
+ Args:
537
+ sequence (:obj:`str`):
538
+ A string to normalize
539
+
540
+ Returns:
541
+ :obj:`str`: A string after normalization
542
+ """
543
+ pass
544
+
545
+ class StripAccents(Normalizer):
546
+ """
547
+ StripAccents normalizer
548
+ """
549
+
550
+ def __init__(self):
551
+ pass
552
+ def normalize(self, normalized):
553
+ """
554
+ Normalize a :class:`~tokenizers.NormalizedString` in-place
555
+
556
+ This method allows to modify a :class:`~tokenizers.NormalizedString` to
557
+ keep track of the alignment information. If you just want to see the result
558
+ of the normalization on a raw string, you can use
559
+ :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
560
+
561
+ Args:
562
+ normalized (:class:`~tokenizers.NormalizedString`):
563
+ The normalized string on which to apply this
564
+ :class:`~tokenizers.normalizers.Normalizer`
565
+ """
566
+ pass
567
+ def normalize_str(self, sequence):
568
+ """
569
+ Normalize the given string
570
+
571
+ This method provides a way to visualize the effect of a
572
+ :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
573
+ information. If you need to get/convert offsets, you can use
574
+ :meth:`~tokenizers.normalizers.Normalizer.normalize`
575
+
576
+ Args:
577
+ sequence (:obj:`str`):
578
+ A string to normalize
579
+
580
+ Returns:
581
+ :obj:`str`: A string after normalization
582
+ """
583
+ pass
env-llmeval/lib/python3.10/site-packages/tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (801 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ from .. import pre_tokenizers
3
+
4
+ PreTokenizer = pre_tokenizers.PreTokenizer
5
+ BertPreTokenizer = pre_tokenizers.BertPreTokenizer
6
+ ByteLevel = pre_tokenizers.ByteLevel
7
+ CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
8
+ Digits = pre_tokenizers.Digits
9
+ Metaspace = pre_tokenizers.Metaspace
10
+ Punctuation = pre_tokenizers.Punctuation
11
+ Sequence = pre_tokenizers.Sequence
12
+ Split = pre_tokenizers.Split
13
+ UnicodeScripts = pre_tokenizers.UnicodeScripts
14
+ Whitespace = pre_tokenizers.Whitespace
15
+ WhitespaceSplit = pre_tokenizers.WhitespaceSplit
env-llmeval/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__init__.pyi ADDED
@@ -0,0 +1,593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class PreTokenizer:
3
+ """
4
+ Base class for all pre-tokenizers
5
+
6
+ This class is not supposed to be instantiated directly. Instead, any implementation of a
7
+ PreTokenizer will return an instance of this class when instantiated.
8
+ """
9
+
10
+ def pre_tokenize(self, pretok):
11
+ """
12
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
13
+
14
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
15
+ keep track of the pre-tokenization, and leverage the capabilities of the
16
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
17
+ the pre-tokenization of a raw string, you can use
18
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
19
+
20
+ Args:
21
+ pretok (:class:`~tokenizers.PreTokenizedString):
22
+ The pre-tokenized string on which to apply this
23
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
24
+ """
25
+ pass
26
+ def pre_tokenize_str(self, sequence):
27
+ """
28
+ Pre tokenize the given string
29
+
30
+ This method provides a way to visualize the effect of a
31
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
32
+ alignment, nor does it provide all the capabilities of the
33
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
34
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
35
+
36
+ Args:
37
+ sequence (:obj:`str`):
38
+ A string to pre-tokeize
39
+
40
+ Returns:
41
+ :obj:`List[Tuple[str, Offsets]]`:
42
+ A list of tuple with the pre-tokenized parts and their offsets
43
+ """
44
+ pass
45
+
46
+ class BertPreTokenizer(PreTokenizer):
47
+ """
48
+ BertPreTokenizer
49
+
50
+ This pre-tokenizer splits tokens on spaces, and also on punctuation.
51
+ Each occurence of a punctuation character will be treated separately.
52
+ """
53
+
54
+ def __init__(self):
55
+ pass
56
+ def pre_tokenize(self, pretok):
57
+ """
58
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
59
+
60
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
61
+ keep track of the pre-tokenization, and leverage the capabilities of the
62
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
63
+ the pre-tokenization of a raw string, you can use
64
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
65
+
66
+ Args:
67
+ pretok (:class:`~tokenizers.PreTokenizedString):
68
+ The pre-tokenized string on which to apply this
69
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
70
+ """
71
+ pass
72
+ def pre_tokenize_str(self, sequence):
73
+ """
74
+ Pre tokenize the given string
75
+
76
+ This method provides a way to visualize the effect of a
77
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
78
+ alignment, nor does it provide all the capabilities of the
79
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
80
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
81
+
82
+ Args:
83
+ sequence (:obj:`str`):
84
+ A string to pre-tokeize
85
+
86
+ Returns:
87
+ :obj:`List[Tuple[str, Offsets]]`:
88
+ A list of tuple with the pre-tokenized parts and their offsets
89
+ """
90
+ pass
91
+
92
+ class ByteLevel(PreTokenizer):
93
+ """
94
+ ByteLevel PreTokenizer
95
+
96
+ This pre-tokenizer takes care of replacing all bytes of the given string
97
+ with a corresponding representation, as well as splitting into words.
98
+
99
+ Args:
100
+ add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
101
+ Whether to add a space to the first word if there isn't already one. This
102
+ lets us treat `hello` exactly like `say hello`.
103
+ use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`):
104
+ Set this to :obj:`False` to prevent this `pre_tokenizer` from using
105
+ the GPT2 specific regexp for spliting on whitespace.
106
+ """
107
+
108
+ def __init__(self, add_prefix_space=True, use_regex=True):
109
+ pass
110
+ @staticmethod
111
+ def alphabet():
112
+ """
113
+ Returns the alphabet used by this PreTokenizer.
114
+
115
+ Since the ByteLevel works as its name suggests, at the byte level, it
116
+ encodes each byte value to a unique visible character. This means that there is a
117
+ total of 256 different characters composing this alphabet.
118
+
119
+ Returns:
120
+ :obj:`List[str]`: A list of characters that compose the alphabet
121
+ """
122
+ pass
123
+ def pre_tokenize(self, pretok):
124
+ """
125
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
126
+
127
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
128
+ keep track of the pre-tokenization, and leverage the capabilities of the
129
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
130
+ the pre-tokenization of a raw string, you can use
131
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
132
+
133
+ Args:
134
+ pretok (:class:`~tokenizers.PreTokenizedString):
135
+ The pre-tokenized string on which to apply this
136
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
137
+ """
138
+ pass
139
+ def pre_tokenize_str(self, sequence):
140
+ """
141
+ Pre tokenize the given string
142
+
143
+ This method provides a way to visualize the effect of a
144
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
145
+ alignment, nor does it provide all the capabilities of the
146
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
147
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
148
+
149
+ Args:
150
+ sequence (:obj:`str`):
151
+ A string to pre-tokeize
152
+
153
+ Returns:
154
+ :obj:`List[Tuple[str, Offsets]]`:
155
+ A list of tuple with the pre-tokenized parts and their offsets
156
+ """
157
+ pass
158
+
159
+ class CharDelimiterSplit(PreTokenizer):
160
+ """
161
+ This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)`
162
+
163
+ Args:
164
+ delimiter: str:
165
+ The delimiter char that will be used to split input
166
+ """
167
+
168
+ def pre_tokenize(self, pretok):
169
+ """
170
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
171
+
172
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
173
+ keep track of the pre-tokenization, and leverage the capabilities of the
174
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
175
+ the pre-tokenization of a raw string, you can use
176
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
177
+
178
+ Args:
179
+ pretok (:class:`~tokenizers.PreTokenizedString):
180
+ The pre-tokenized string on which to apply this
181
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
182
+ """
183
+ pass
184
+ def pre_tokenize_str(self, sequence):
185
+ """
186
+ Pre tokenize the given string
187
+
188
+ This method provides a way to visualize the effect of a
189
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
190
+ alignment, nor does it provide all the capabilities of the
191
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
192
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
193
+
194
+ Args:
195
+ sequence (:obj:`str`):
196
+ A string to pre-tokeize
197
+
198
+ Returns:
199
+ :obj:`List[Tuple[str, Offsets]]`:
200
+ A list of tuple with the pre-tokenized parts and their offsets
201
+ """
202
+ pass
203
+
204
+ class Digits(PreTokenizer):
205
+ """
206
+ This pre-tokenizer simply splits using the digits in separate tokens
207
+
208
+ Args:
209
+ individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`):
210
+ If set to True, digits will each be separated as follows::
211
+
212
+ "Call 123 please" -> "Call ", "1", "2", "3", " please"
213
+
214
+ If set to False, digits will grouped as follows::
215
+
216
+ "Call 123 please" -> "Call ", "123", " please"
217
+ """
218
+
219
+ def __init__(self, individual_digits=False):
220
+ pass
221
+ def pre_tokenize(self, pretok):
222
+ """
223
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
224
+
225
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
226
+ keep track of the pre-tokenization, and leverage the capabilities of the
227
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
228
+ the pre-tokenization of a raw string, you can use
229
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
230
+
231
+ Args:
232
+ pretok (:class:`~tokenizers.PreTokenizedString):
233
+ The pre-tokenized string on which to apply this
234
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
235
+ """
236
+ pass
237
+ def pre_tokenize_str(self, sequence):
238
+ """
239
+ Pre tokenize the given string
240
+
241
+ This method provides a way to visualize the effect of a
242
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
243
+ alignment, nor does it provide all the capabilities of the
244
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
245
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
246
+
247
+ Args:
248
+ sequence (:obj:`str`):
249
+ A string to pre-tokeize
250
+
251
+ Returns:
252
+ :obj:`List[Tuple[str, Offsets]]`:
253
+ A list of tuple with the pre-tokenized parts and their offsets
254
+ """
255
+ pass
256
+
257
+ class Metaspace(PreTokenizer):
258
+ """
259
+ Metaspace pre-tokenizer
260
+
261
+ This pre-tokenizer replaces any whitespace by the provided replacement character.
262
+ It then tries to split on these spaces.
263
+
264
+ Args:
265
+ replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
266
+ The replacement character. Must be exactly one character. By default we
267
+ use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
268
+
269
+ add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
270
+ Whether to add a space to the first word if there isn't already one. This
271
+ lets us treat `hello` exactly like `say hello`.
272
+ """
273
+
274
+ def __init__(self, replacement="_", add_prefix_space=True):
275
+ pass
276
+ def pre_tokenize(self, pretok):
277
+ """
278
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
279
+
280
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
281
+ keep track of the pre-tokenization, and leverage the capabilities of the
282
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
283
+ the pre-tokenization of a raw string, you can use
284
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
285
+
286
+ Args:
287
+ pretok (:class:`~tokenizers.PreTokenizedString):
288
+ The pre-tokenized string on which to apply this
289
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
290
+ """
291
+ pass
292
+ def pre_tokenize_str(self, sequence):
293
+ """
294
+ Pre tokenize the given string
295
+
296
+ This method provides a way to visualize the effect of a
297
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
298
+ alignment, nor does it provide all the capabilities of the
299
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
300
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
301
+
302
+ Args:
303
+ sequence (:obj:`str`):
304
+ A string to pre-tokeize
305
+
306
+ Returns:
307
+ :obj:`List[Tuple[str, Offsets]]`:
308
+ A list of tuple with the pre-tokenized parts and their offsets
309
+ """
310
+ pass
311
+
312
+ class Punctuation(PreTokenizer):
313
+ """
314
+ This pre-tokenizer simply splits on punctuation as individual characters.
315
+
316
+ Args:
317
+ behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
318
+ The behavior to use when splitting.
319
+ Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next",
320
+ "contiguous"
321
+ """
322
+
323
+ def __init__(self, behavior="isolated"):
324
+ pass
325
+ def pre_tokenize(self, pretok):
326
+ """
327
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
328
+
329
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
330
+ keep track of the pre-tokenization, and leverage the capabilities of the
331
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
332
+ the pre-tokenization of a raw string, you can use
333
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
334
+
335
+ Args:
336
+ pretok (:class:`~tokenizers.PreTokenizedString):
337
+ The pre-tokenized string on which to apply this
338
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
339
+ """
340
+ pass
341
+ def pre_tokenize_str(self, sequence):
342
+ """
343
+ Pre tokenize the given string
344
+
345
+ This method provides a way to visualize the effect of a
346
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
347
+ alignment, nor does it provide all the capabilities of the
348
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
349
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
350
+
351
+ Args:
352
+ sequence (:obj:`str`):
353
+ A string to pre-tokeize
354
+
355
+ Returns:
356
+ :obj:`List[Tuple[str, Offsets]]`:
357
+ A list of tuple with the pre-tokenized parts and their offsets
358
+ """
359
+ pass
360
+
361
+ class Sequence(PreTokenizer):
362
+ """
363
+ This pre-tokenizer composes other pre_tokenizers and applies them in sequence
364
+ """
365
+
366
+ def __init__(self, pretokenizers):
367
+ pass
368
+ def pre_tokenize(self, pretok):
369
+ """
370
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
371
+
372
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
373
+ keep track of the pre-tokenization, and leverage the capabilities of the
374
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
375
+ the pre-tokenization of a raw string, you can use
376
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
377
+
378
+ Args:
379
+ pretok (:class:`~tokenizers.PreTokenizedString):
380
+ The pre-tokenized string on which to apply this
381
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
382
+ """
383
+ pass
384
+ def pre_tokenize_str(self, sequence):
385
+ """
386
+ Pre tokenize the given string
387
+
388
+ This method provides a way to visualize the effect of a
389
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
390
+ alignment, nor does it provide all the capabilities of the
391
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
392
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
393
+
394
+ Args:
395
+ sequence (:obj:`str`):
396
+ A string to pre-tokeize
397
+
398
+ Returns:
399
+ :obj:`List[Tuple[str, Offsets]]`:
400
+ A list of tuple with the pre-tokenized parts and their offsets
401
+ """
402
+ pass
403
+
404
+ class Split(PreTokenizer):
405
+ """
406
+ Split PreTokenizer
407
+
408
+ This versatile pre-tokenizer splits using the provided pattern and
409
+ according to the provided behavior. The pattern can be inverted by
410
+ making use of the invert flag.
411
+
412
+ Args:
413
+ pattern (:obj:`str` or :class:`~tokenizers.Regex`):
414
+ A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex`
415
+
416
+ behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
417
+ The behavior to use when splitting.
418
+ Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
419
+ "contiguous"
420
+
421
+ invert (:obj:`bool`, `optional`, defaults to :obj:`False`):
422
+ Whether to invert the pattern.
423
+ """
424
+
425
+ def __init__(self, pattern, behavior, invert=False):
426
+ pass
427
+ def pre_tokenize(self, pretok):
428
+ """
429
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
430
+
431
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
432
+ keep track of the pre-tokenization, and leverage the capabilities of the
433
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
434
+ the pre-tokenization of a raw string, you can use
435
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
436
+
437
+ Args:
438
+ pretok (:class:`~tokenizers.PreTokenizedString):
439
+ The pre-tokenized string on which to apply this
440
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
441
+ """
442
+ pass
443
+ def pre_tokenize_str(self, sequence):
444
+ """
445
+ Pre tokenize the given string
446
+
447
+ This method provides a way to visualize the effect of a
448
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
449
+ alignment, nor does it provide all the capabilities of the
450
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
451
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
452
+
453
+ Args:
454
+ sequence (:obj:`str`):
455
+ A string to pre-tokeize
456
+
457
+ Returns:
458
+ :obj:`List[Tuple[str, Offsets]]`:
459
+ A list of tuple with the pre-tokenized parts and their offsets
460
+ """
461
+ pass
462
+
463
+ class UnicodeScripts(PreTokenizer):
464
+ """
465
+ This pre-tokenizer splits on characters that belong to different language family
466
+ It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
467
+ Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
468
+ This mimicks SentencePiece Unigram implementation.
469
+ """
470
+
471
+ def __init__(self):
472
+ pass
473
+ def pre_tokenize(self, pretok):
474
+ """
475
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
476
+
477
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
478
+ keep track of the pre-tokenization, and leverage the capabilities of the
479
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
480
+ the pre-tokenization of a raw string, you can use
481
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
482
+
483
+ Args:
484
+ pretok (:class:`~tokenizers.PreTokenizedString):
485
+ The pre-tokenized string on which to apply this
486
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
487
+ """
488
+ pass
489
+ def pre_tokenize_str(self, sequence):
490
+ """
491
+ Pre tokenize the given string
492
+
493
+ This method provides a way to visualize the effect of a
494
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
495
+ alignment, nor does it provide all the capabilities of the
496
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
497
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
498
+
499
+ Args:
500
+ sequence (:obj:`str`):
501
+ A string to pre-tokeize
502
+
503
+ Returns:
504
+ :obj:`List[Tuple[str, Offsets]]`:
505
+ A list of tuple with the pre-tokenized parts and their offsets
506
+ """
507
+ pass
508
+
509
+ class Whitespace(PreTokenizer):
510
+ """
511
+ This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
512
+ """
513
+
514
+ def __init__(self):
515
+ pass
516
+ def pre_tokenize(self, pretok):
517
+ """
518
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
519
+
520
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
521
+ keep track of the pre-tokenization, and leverage the capabilities of the
522
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
523
+ the pre-tokenization of a raw string, you can use
524
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
525
+
526
+ Args:
527
+ pretok (:class:`~tokenizers.PreTokenizedString):
528
+ The pre-tokenized string on which to apply this
529
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
530
+ """
531
+ pass
532
+ def pre_tokenize_str(self, sequence):
533
+ """
534
+ Pre tokenize the given string
535
+
536
+ This method provides a way to visualize the effect of a
537
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
538
+ alignment, nor does it provide all the capabilities of the
539
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
540
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
541
+
542
+ Args:
543
+ sequence (:obj:`str`):
544
+ A string to pre-tokeize
545
+
546
+ Returns:
547
+ :obj:`List[Tuple[str, Offsets]]`:
548
+ A list of tuple with the pre-tokenized parts and their offsets
549
+ """
550
+ pass
551
+
552
+ class WhitespaceSplit(PreTokenizer):
553
+ """
554
+ This pre-tokenizer simply splits on the whitespace. Works like `.split()`
555
+ """
556
+
557
+ def __init__(self):
558
+ pass
559
+ def pre_tokenize(self, pretok):
560
+ """
561
+ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
562
+
563
+ This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
564
+ keep track of the pre-tokenization, and leverage the capabilities of the
565
+ :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
566
+ the pre-tokenization of a raw string, you can use
567
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
568
+
569
+ Args:
570
+ pretok (:class:`~tokenizers.PreTokenizedString):
571
+ The pre-tokenized string on which to apply this
572
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`
573
+ """
574
+ pass
575
+ def pre_tokenize_str(self, sequence):
576
+ """
577
+ Pre tokenize the given string
578
+
579
+ This method provides a way to visualize the effect of a
580
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
581
+ alignment, nor does it provide all the capabilities of the
582
+ :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
583
+ :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
584
+
585
+ Args:
586
+ sequence (:obj:`str`):
587
+ A string to pre-tokeize
588
+
589
+ Returns:
590
+ :obj:`List[Tuple[str, Offsets]]`:
591
+ A list of tuple with the pre-tokenized parts and their offsets
592
+ """
593
+ pass
env-llmeval/lib/python3.10/site-packages/tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (482 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/processors/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ from .. import processors
3
+
4
+ PostProcessor = processors.PostProcessor
5
+ BertProcessing = processors.BertProcessing
6
+ ByteLevel = processors.ByteLevel
7
+ RobertaProcessing = processors.RobertaProcessing
8
+ Sequence = processors.Sequence
9
+ TemplateProcessing = processors.TemplateProcessing
env-llmeval/lib/python3.10/site-packages/tokenizers/processors/__init__.pyi ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class PostProcessor:
3
+ """
4
+ Base class for all post-processors
5
+
6
+ This class is not supposed to be instantiated directly. Instead, any implementation of
7
+ a PostProcessor will return an instance of this class when instantiated.
8
+ """
9
+
10
+ def num_special_tokens_to_add(self, is_pair):
11
+ """
12
+ Return the number of special tokens that would be added for single/pair sentences.
13
+
14
+ Args:
15
+ is_pair (:obj:`bool`):
16
+ Whether the input would be a pair of sequences
17
+
18
+ Returns:
19
+ :obj:`int`: The number of tokens to add
20
+ """
21
+ pass
22
+ def process(self, encoding, pair=None, add_special_tokens=True):
23
+ """
24
+ Post-process the given encodings, generating the final one
25
+
26
+ Args:
27
+ encoding (:class:`~tokenizers.Encoding`):
28
+ The encoding for the first sequence
29
+
30
+ pair (:class:`~tokenizers.Encoding`, `optional`):
31
+ The encoding for the pair sequence
32
+
33
+ add_special_tokens (:obj:`bool`):
34
+ Whether to add the special tokens
35
+
36
+ Return:
37
+ :class:`~tokenizers.Encoding`: The final encoding
38
+ """
39
+ pass
40
+
41
+ class BertProcessing(PostProcessor):
42
+ """
43
+ This post-processor takes care of adding the special tokens needed by
44
+ a Bert model:
45
+
46
+ - a SEP token
47
+ - a CLS token
48
+
49
+ Args:
50
+ sep (:obj:`Tuple[str, int]`):
51
+ A tuple with the string representation of the SEP token, and its id
52
+
53
+ cls (:obj:`Tuple[str, int]`):
54
+ A tuple with the string representation of the CLS token, and its id
55
+ """
56
+
57
+ def __init__(self, sep, cls):
58
+ pass
59
+ def num_special_tokens_to_add(self, is_pair):
60
+ """
61
+ Return the number of special tokens that would be added for single/pair sentences.
62
+
63
+ Args:
64
+ is_pair (:obj:`bool`):
65
+ Whether the input would be a pair of sequences
66
+
67
+ Returns:
68
+ :obj:`int`: The number of tokens to add
69
+ """
70
+ pass
71
+ def process(self, encoding, pair=None, add_special_tokens=True):
72
+ """
73
+ Post-process the given encodings, generating the final one
74
+
75
+ Args:
76
+ encoding (:class:`~tokenizers.Encoding`):
77
+ The encoding for the first sequence
78
+
79
+ pair (:class:`~tokenizers.Encoding`, `optional`):
80
+ The encoding for the pair sequence
81
+
82
+ add_special_tokens (:obj:`bool`):
83
+ Whether to add the special tokens
84
+
85
+ Return:
86
+ :class:`~tokenizers.Encoding`: The final encoding
87
+ """
88
+ pass
89
+
90
+ class ByteLevel(PostProcessor):
91
+ """
92
+ This post-processor takes care of trimming the offsets.
93
+
94
+ By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
95
+ want the offsets to include these whitespaces, then this PostProcessor must be used.
96
+
97
+ Args:
98
+ trim_offsets (:obj:`bool`):
99
+ Whether to trim the whitespaces from the produced offsets.
100
+ """
101
+
102
+ def __init__(self, trim_offsets=True):
103
+ pass
104
+ def num_special_tokens_to_add(self, is_pair):
105
+ """
106
+ Return the number of special tokens that would be added for single/pair sentences.
107
+
108
+ Args:
109
+ is_pair (:obj:`bool`):
110
+ Whether the input would be a pair of sequences
111
+
112
+ Returns:
113
+ :obj:`int`: The number of tokens to add
114
+ """
115
+ pass
116
+ def process(self, encoding, pair=None, add_special_tokens=True):
117
+ """
118
+ Post-process the given encodings, generating the final one
119
+
120
+ Args:
121
+ encoding (:class:`~tokenizers.Encoding`):
122
+ The encoding for the first sequence
123
+
124
+ pair (:class:`~tokenizers.Encoding`, `optional`):
125
+ The encoding for the pair sequence
126
+
127
+ add_special_tokens (:obj:`bool`):
128
+ Whether to add the special tokens
129
+
130
+ Return:
131
+ :class:`~tokenizers.Encoding`: The final encoding
132
+ """
133
+ pass
134
+
135
+ class RobertaProcessing(PostProcessor):
136
+ """
137
+ This post-processor takes care of adding the special tokens needed by
138
+ a Roberta model:
139
+
140
+ - a SEP token
141
+ - a CLS token
142
+
143
+ It also takes care of trimming the offsets.
144
+ By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
145
+ want the offsets to include these whitespaces, then this PostProcessor should be initialized
146
+ with :obj:`trim_offsets=True`
147
+
148
+ Args:
149
+ sep (:obj:`Tuple[str, int]`):
150
+ A tuple with the string representation of the SEP token, and its id
151
+
152
+ cls (:obj:`Tuple[str, int]`):
153
+ A tuple with the string representation of the CLS token, and its id
154
+
155
+ trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
156
+ Whether to trim the whitespaces from the produced offsets.
157
+
158
+ add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
159
+ Whether the add_prefix_space option was enabled during pre-tokenization. This
160
+ is relevant because it defines the way the offsets are trimmed out.
161
+ """
162
+
163
+ def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True):
164
+ pass
165
+ def num_special_tokens_to_add(self, is_pair):
166
+ """
167
+ Return the number of special tokens that would be added for single/pair sentences.
168
+
169
+ Args:
170
+ is_pair (:obj:`bool`):
171
+ Whether the input would be a pair of sequences
172
+
173
+ Returns:
174
+ :obj:`int`: The number of tokens to add
175
+ """
176
+ pass
177
+ def process(self, encoding, pair=None, add_special_tokens=True):
178
+ """
179
+ Post-process the given encodings, generating the final one
180
+
181
+ Args:
182
+ encoding (:class:`~tokenizers.Encoding`):
183
+ The encoding for the first sequence
184
+
185
+ pair (:class:`~tokenizers.Encoding`, `optional`):
186
+ The encoding for the pair sequence
187
+
188
+ add_special_tokens (:obj:`bool`):
189
+ Whether to add the special tokens
190
+
191
+ Return:
192
+ :class:`~tokenizers.Encoding`: The final encoding
193
+ """
194
+ pass
195
+
196
+ class Sequence(PostProcessor):
197
+ """
198
+ Sequence Processor
199
+
200
+ Args:
201
+ processors (:obj:`List[PostProcessor]`)
202
+ The processors that need to be chained
203
+ """
204
+
205
+ def __init__(self, processors):
206
+ pass
207
+ def num_special_tokens_to_add(self, is_pair):
208
+ """
209
+ Return the number of special tokens that would be added for single/pair sentences.
210
+
211
+ Args:
212
+ is_pair (:obj:`bool`):
213
+ Whether the input would be a pair of sequences
214
+
215
+ Returns:
216
+ :obj:`int`: The number of tokens to add
217
+ """
218
+ pass
219
+ def process(self, encoding, pair=None, add_special_tokens=True):
220
+ """
221
+ Post-process the given encodings, generating the final one
222
+
223
+ Args:
224
+ encoding (:class:`~tokenizers.Encoding`):
225
+ The encoding for the first sequence
226
+
227
+ pair (:class:`~tokenizers.Encoding`, `optional`):
228
+ The encoding for the pair sequence
229
+
230
+ add_special_tokens (:obj:`bool`):
231
+ Whether to add the special tokens
232
+
233
+ Return:
234
+ :class:`~tokenizers.Encoding`: The final encoding
235
+ """
236
+ pass
237
+
238
+ class TemplateProcessing(PostProcessor):
239
+ """
240
+ Provides a way to specify templates in order to add the special tokens to each
241
+ input sequence as relevant.
242
+
243
+ Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
244
+ delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
245
+ sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
246
+ sequences. The final result looks like this:
247
+
248
+ - Single sequence: :obj:`[CLS] Hello there [SEP]`
249
+ - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
250
+
251
+ With the type ids as following::
252
+
253
+ [CLS] ... [SEP] ... [SEP]
254
+ 0 0 0 1 1
255
+
256
+ You can achieve such behavior using a TemplateProcessing::
257
+
258
+ TemplateProcessing(
259
+ single="[CLS] $0 [SEP]",
260
+ pair="[CLS] $A [SEP] $B:1 [SEP]:1",
261
+ special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
262
+ )
263
+
264
+ In this example, each input sequence is identified using a ``$`` construct. This identifier
265
+ lets us specify each input sequence, and the type_id to use. When nothing is specified,
266
+ it uses the default values. Here are the different ways to specify it:
267
+
268
+ - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
269
+ - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
270
+ - Specifying both: ``$A:0``, ``$B:1``, ...
271
+
272
+ The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
273
+
274
+ **Warning**: You must ensure that you are giving the correct tokens/ids as these
275
+ will be added to the Encoding without any further check. If the given ids correspond
276
+ to something totally different in a `Tokenizer` using this `PostProcessor`, it
277
+ might lead to unexpected results.
278
+
279
+ Args:
280
+ single (:obj:`Template`):
281
+ The template used for single sequences
282
+
283
+ pair (:obj:`Template`):
284
+ The template used when both sequences are specified
285
+
286
+ special_tokens (:obj:`Tokens`):
287
+ The list of special tokens used in each sequences
288
+
289
+ Types:
290
+
291
+ Template (:obj:`str` or :obj:`List`):
292
+ - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
293
+ - If a :obj:`List[str]` is provided, a list of tokens
294
+
295
+ Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
296
+ - A :obj:`Tuple` with both a token and its associated ID, in any order
297
+ - A :obj:`dict` with the following keys:
298
+ - "id": :obj:`str` => The special token id, as specified in the Template
299
+ - "ids": :obj:`List[int]` => The associated IDs
300
+ - "tokens": :obj:`List[str]` => The associated tokens
301
+
302
+ The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
303
+ the same length.
304
+ """
305
+
306
+ def __init__(self, single, pair, special_tokens):
307
+ pass
308
+ def num_special_tokens_to_add(self, is_pair):
309
+ """
310
+ Return the number of special tokens that would be added for single/pair sentences.
311
+
312
+ Args:
313
+ is_pair (:obj:`bool`):
314
+ Whether the input would be a pair of sequences
315
+
316
+ Returns:
317
+ :obj:`int`: The number of tokens to add
318
+ """
319
+ pass
320
+ def process(self, encoding, pair=None, add_special_tokens=True):
321
+ """
322
+ Post-process the given encodings, generating the final one
323
+
324
+ Args:
325
+ encoding (:class:`~tokenizers.Encoding`):
326
+ The encoding for the first sequence
327
+
328
+ pair (:class:`~tokenizers.Encoding`, `optional`):
329
+ The encoding for the pair sequence
330
+
331
+ add_special_tokens (:obj:`bool`):
332
+ Whether to add the special tokens
333
+
334
+ Return:
335
+ :class:`~tokenizers.Encoding`: The final encoding
336
+ """
337
+ pass
env-llmeval/lib/python3.10/site-packages/tokenizers/processors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (360 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/tools/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .visualizer import Annotation, EncodingVisualizer
env-llmeval/lib/python3.10/site-packages/tokenizers/tools/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (255 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/tools/__pycache__/visualizer.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tokenizers/tools/visualizer-styles.css ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .tokenized-text {
2
+ width:100%;
3
+ padding:2rem;
4
+ max-height: 400px;
5
+ overflow-y: auto;
6
+ box-sizing:border-box;
7
+ line-height:4rem; /* Lots of space between lines */
8
+ font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace;
9
+ box-shadow: 2px 2px 2px rgba(0,0,0,0.2);
10
+ background-color: rgba(0,0,0,0.01);
11
+ letter-spacing:2px; /* Give some extra separation between chars */
12
+ }
13
+ .non-token{
14
+ /* White space and other things the tokenizer ignores*/
15
+ white-space: pre;
16
+ letter-spacing:4px;
17
+ border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/
18
+ border-bottom:1px solid #A0A0A0;
19
+ line-height: 1rem;
20
+ height: calc(100% - 2px);
21
+ }
22
+
23
+ .token {
24
+ white-space: pre;
25
+ position:relative;
26
+ color:black;
27
+ letter-spacing:2px;
28
+ }
29
+
30
+ .annotation{
31
+ white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */
32
+ border-radius:4px;
33
+ position:relative;
34
+ width:fit-content;
35
+ }
36
+ .annotation:before {
37
+ /*The before holds the text and the after holds the background*/
38
+ z-index:1000; /* Make sure this is above the background */
39
+ content:attr(data-label); /* The annotations label is on a data attribute */
40
+ color:white;
41
+ position:absolute;
42
+ font-size:1rem;
43
+ text-align:center;
44
+ font-weight:bold;
45
+
46
+ top:1.75rem;
47
+ line-height:0;
48
+ left:0;
49
+ width:100%;
50
+ padding:0.5rem 0;
51
+ /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/
52
+ overflow: hidden;
53
+ white-space: nowrap;
54
+ text-overflow:ellipsis;
55
+ }
56
+
57
+ .annotation:after {
58
+ content:attr(data-label); /* The content defines the width of the annotation*/
59
+ position:absolute;
60
+ font-size:0.75rem;
61
+ text-align:center;
62
+ font-weight:bold;
63
+ text-overflow:ellipsis;
64
+ top:1.75rem;
65
+ line-height:0;
66
+ overflow: hidden;
67
+ white-space: nowrap;
68
+
69
+ left:0;
70
+ width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
71
+
72
+ padding:0.5rem 0;
73
+ /* Nast hack below:
74
+ We set the annotations color in code because we don't know the colors at css time.
75
+ But you can't pass a color as a data attribute to get it into the pseudo element (this thing)
76
+ So to get around that, annotations have the color set on them with a style attribute and then we
77
+ can get the color with currentColor.
78
+ Annotations wrap tokens and tokens set the color back to black
79
+ */
80
+ background-color: currentColor;
81
+ }
82
+ .annotation:hover::after, .annotation:hover::before{
83
+ /* When the user hovers over an annotation expand the label to display in full
84
+ */
85
+ min-width: fit-content;
86
+ }
87
+
88
+ .annotation:hover{
89
+ /* Emphasize the annotation start end with a border on hover*/
90
+ border-color: currentColor;
91
+ border: 2px solid;
92
+ }
93
+ .special-token:not(:empty){
94
+ /*
95
+ A none empty special token is like UNK (as opposed to CLS which has no representation in the text )
96
+ */
97
+ position:relative;
98
+ }
99
+ .special-token:empty::before{
100
+ /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/
101
+ content:attr(data-stok);
102
+ background:#202020;
103
+ font-size:0.75rem;
104
+ color:white;
105
+ margin: 0 0.25rem;
106
+ padding: 0.25rem;
107
+ border-radius:4px
108
+ }
109
+
110
+ .special-token:not(:empty):before {
111
+ /* Special tokens that have text (UNK) are displayed above the actual text*/
112
+ content:attr(data-stok);
113
+ position:absolute;
114
+ bottom:1.75rem;
115
+ min-width:100%;
116
+ width:100%;
117
+ height:1rem;
118
+ line-height:1rem;
119
+ font-size:1rem;
120
+ text-align:center;
121
+ color:white;
122
+ font-weight:bold;
123
+ background:#202020;
124
+ border-radius:10%;
125
+ }
126
+ /*
127
+ We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations
128
+ instead we apply even and odd class at generation time and color them that way
129
+ */
130
+ .even-token{
131
+ background:#DCDCDC ;
132
+ border: 1px solid #DCDCDC;
133
+ }
134
+ .odd-token{
135
+ background:#A0A0A0;
136
+ border: 1px solid #A0A0A0;
137
+ }
138
+ .even-token.multi-token,.odd-token.multi-token{
139
+ background: repeating-linear-gradient(
140
+ 45deg,
141
+ transparent,
142
+ transparent 1px,
143
+ #ccc 1px,
144
+ #ccc 1px
145
+ ),
146
+ /* on "bottom" */
147
+ linear-gradient(
148
+ to bottom,
149
+ #FFB6C1,
150
+ #999
151
+ );
152
+ }
153
+
154
+ .multi-token:hover::after {
155
+ content:"This char has more than 1 token"; /* The content defines the width of the annotation*/
156
+ color:white;
157
+ background-color: black;
158
+ position:absolute;
159
+ font-size:0.75rem;
160
+ text-align:center;
161
+ font-weight:bold;
162
+ text-overflow:ellipsis;
163
+ top:1.75rem;
164
+ line-height:0;
165
+ overflow: hidden;
166
+ white-space: nowrap;
167
+ left:0;
168
+ width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
169
+ padding:0.5rem 0;
170
+ }
env-llmeval/lib/python3.10/site-packages/tokenizers/tools/visualizer.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import os
3
+ import re
4
+ from string import Template
5
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
6
+
7
+ from tokenizers import Encoding, Tokenizer
8
+
9
+
10
+ dirname = os.path.dirname(__file__)
11
+ css_filename = os.path.join(dirname, "visualizer-styles.css")
12
+ with open(css_filename) as f:
13
+ css = f.read()
14
+
15
+
16
+ class Annotation:
17
+ start: int
18
+ end: int
19
+ label: int
20
+
21
+ def __init__(self, start: int, end: int, label: str):
22
+ self.start = start
23
+ self.end = end
24
+ self.label = label
25
+
26
+
27
+ AnnotationList = List[Annotation]
28
+ PartialIntList = List[Optional[int]]
29
+
30
+
31
+ class CharStateKey(NamedTuple):
32
+ token_ix: Optional[int]
33
+ anno_ix: Optional[int]
34
+
35
+
36
+ class CharState:
37
+ char_ix: Optional[int]
38
+
39
+ def __init__(self, char_ix):
40
+ self.char_ix = char_ix
41
+
42
+ self.anno_ix: Optional[int] = None
43
+ self.tokens: List[int] = []
44
+
45
+ @property
46
+ def token_ix(self):
47
+ return self.tokens[0] if len(self.tokens) > 0 else None
48
+
49
+ @property
50
+ def is_multitoken(self):
51
+ """
52
+ BPE tokenizers can output more than one token for a char
53
+ """
54
+ return len(self.tokens) > 1
55
+
56
+ def partition_key(self) -> CharStateKey:
57
+ return CharStateKey(
58
+ token_ix=self.token_ix,
59
+ anno_ix=self.anno_ix,
60
+ )
61
+
62
+
63
+ class Aligned:
64
+ pass
65
+
66
+
67
+ class EncodingVisualizer:
68
+ """
69
+ Build an EncodingVisualizer
70
+
71
+ Args:
72
+
73
+ tokenizer (:class:`~tokenizers.Tokenizer`):
74
+ A tokenizer instance
75
+
76
+ default_to_notebook (:obj:`bool`):
77
+ Whether to render html output in a notebook by default
78
+
79
+ annotation_converter (:obj:`Callable`, `optional`):
80
+ An optional (lambda) function that takes an annotation in any format and returns
81
+ an Annotation object
82
+ """
83
+
84
+ unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
85
+
86
+ def __init__(
87
+ self,
88
+ tokenizer: Tokenizer,
89
+ default_to_notebook: bool = True,
90
+ annotation_converter: Optional[Callable[[Any], Annotation]] = None,
91
+ ):
92
+ if default_to_notebook:
93
+ try:
94
+ from IPython.core.display import HTML, display
95
+ except ImportError as e:
96
+ raise Exception(
97
+ """We couldn't import IPython utils for html display.
98
+ Are you running in a notebook?
99
+ You can also pass `default_to_notebook=False` to get back raw HTML
100
+ """
101
+ )
102
+
103
+ self.tokenizer = tokenizer
104
+ self.default_to_notebook = default_to_notebook
105
+ self.annotation_coverter = annotation_converter
106
+ pass
107
+
108
+ def __call__(
109
+ self,
110
+ text: str,
111
+ annotations: AnnotationList = [],
112
+ default_to_notebook: Optional[bool] = None,
113
+ ) -> Optional[str]:
114
+ """
115
+ Build a visualization of the given text
116
+
117
+ Args:
118
+ text (:obj:`str`):
119
+ The text to tokenize
120
+
121
+ annotations (:obj:`List[Annotation]`, `optional`):
122
+ An optional list of annotations of the text. The can either be an annotation class
123
+ or anything else if you instantiated the visualizer with a converter function
124
+
125
+ default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
126
+ If True, will render the html in a notebook. Otherwise returns an html string.
127
+
128
+ Returns:
129
+ The HTML string if default_to_notebook is False, otherwise (default) returns None and
130
+ renders the HTML in the notebook
131
+
132
+ """
133
+ final_default_to_notebook = self.default_to_notebook
134
+ if default_to_notebook is not None:
135
+ final_default_to_notebook = default_to_notebook
136
+ if final_default_to_notebook:
137
+ try:
138
+ from IPython.core.display import HTML, display
139
+ except ImportError as e:
140
+ raise Exception(
141
+ """We couldn't import IPython utils for html display.
142
+ Are you running in a notebook?"""
143
+ )
144
+ if self.annotation_coverter is not None:
145
+ annotations = list(map(self.annotation_coverter, annotations))
146
+ encoding = self.tokenizer.encode(text)
147
+ html = EncodingVisualizer.__make_html(text, encoding, annotations)
148
+ if final_default_to_notebook:
149
+ display(HTML(html))
150
+ else:
151
+ return html
152
+
153
+ @staticmethod
154
+ def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
155
+ """
156
+ Generates a color palette for all the labels in a given set of annotations
157
+
158
+ Args:
159
+ annotations (:obj:`Annotation`):
160
+ A list of annotations
161
+
162
+ Returns:
163
+ :obj:`dict`: A dictionary mapping labels to colors in HSL format
164
+ """
165
+ if len(annotations) == 0:
166
+ return {}
167
+ labels = set(map(lambda x: x.label, annotations))
168
+ num_labels = len(labels)
169
+ h_step = int(255 / num_labels)
170
+ if h_step < 20:
171
+ h_step = 20
172
+ s = 32
173
+ l = 64
174
+ h = 10
175
+ colors = {}
176
+
177
+ for label in sorted(labels): # sort so we always get the same colors for a given set of labels
178
+ colors[label] = f"hsl({h},{s}%,{l}%"
179
+ h += h_step
180
+ return colors
181
+
182
+ @staticmethod
183
+ def consecutive_chars_to_html(
184
+ consecutive_chars_list: List[CharState],
185
+ text: str,
186
+ encoding: Encoding,
187
+ ):
188
+ """
189
+ Converts a list of "consecutive chars" into a single HTML element.
190
+ Chars are consecutive if they fall under the same word, token and annotation.
191
+ The CharState class is a named tuple with a "partition_key" method that makes it easy to
192
+ compare if two chars are consecutive.
193
+
194
+ Args:
195
+ consecutive_chars_list (:obj:`List[CharState]`):
196
+ A list of CharStates that have been grouped together
197
+
198
+ text (:obj:`str`):
199
+ The original text being processed
200
+
201
+ encoding (:class:`~tokenizers.Encoding`):
202
+ The encoding returned from the tokenizer
203
+
204
+ Returns:
205
+ :obj:`str`: The HTML span for a set of consecutive chars
206
+ """
207
+ first = consecutive_chars_list[0]
208
+ if first.char_ix is None:
209
+ # its a special token
210
+ stoken = encoding.tokens[first.token_ix]
211
+ # special tokens are represented as empty spans. We use the data attribute and css
212
+ # magic to display it
213
+ return f'<span class="special-token" data-stoken={stoken}></span>'
214
+ # We're not in a special token so this group has a start and end.
215
+ last = consecutive_chars_list[-1]
216
+ start = first.char_ix
217
+ end = last.char_ix + 1
218
+ span_text = text[start:end]
219
+ css_classes = [] # What css classes will we apply on the resulting span
220
+ data_items = {} # What data attributes will we apply on the result span
221
+ if first.token_ix is not None:
222
+ # We can either be in a token or not (e.g. in white space)
223
+ css_classes.append("token")
224
+ if first.is_multitoken:
225
+ css_classes.append("multi-token")
226
+ if first.token_ix % 2:
227
+ # We use this to color alternating tokens.
228
+ # A token might be split by an annotation that ends in the middle of it, so this
229
+ # lets us visually indicate a consecutive token despite its possible splitting in
230
+ # the html markup
231
+ css_classes.append("odd-token")
232
+ else:
233
+ # Like above, but a different color so we can see the tokens alternate
234
+ css_classes.append("even-token")
235
+ if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
236
+ # This is a special token that is in the text. probably UNK
237
+ css_classes.append("special-token")
238
+ # TODO is this the right name for the data attribute ?
239
+ data_items["stok"] = encoding.tokens[first.token_ix]
240
+ else:
241
+ # In this case we are looking at a group/single char that is not tokenized.
242
+ # e.g. white space
243
+ css_classes.append("non-token")
244
+ css = f'''class="{' '.join(css_classes)}"'''
245
+ data = ""
246
+ for key, val in data_items.items():
247
+ data += f' data-{key}="{val}"'
248
+ return f"<span {css} {data} >{span_text}</span>"
249
+
250
+ @staticmethod
251
+ def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
252
+ char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
253
+ current_consecutive_chars = [char_states[0]]
254
+ prev_anno_ix = char_states[0].anno_ix
255
+ spans = []
256
+ label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
257
+ cur_anno_ix = char_states[0].anno_ix
258
+ if cur_anno_ix is not None:
259
+ # If we started in an annotation make a span for it
260
+ anno = annotations[cur_anno_ix]
261
+ label = anno.label
262
+ color = label_colors_dict[label]
263
+ spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
264
+
265
+ for cs in char_states[1:]:
266
+ cur_anno_ix = cs.anno_ix
267
+ if cur_anno_ix != prev_anno_ix:
268
+ # If we've transitioned in or out of an annotation
269
+ spans.append(
270
+ # Create a span from the current consecutive characters
271
+ EncodingVisualizer.consecutive_chars_to_html(
272
+ current_consecutive_chars,
273
+ text=text,
274
+ encoding=encoding,
275
+ )
276
+ )
277
+ current_consecutive_chars = [cs]
278
+
279
+ if prev_anno_ix is not None:
280
+ # if we transitioned out of an annotation close it's span
281
+ spans.append("</span>")
282
+ if cur_anno_ix is not None:
283
+ # If we entered a new annotation make a span for it
284
+ anno = annotations[cur_anno_ix]
285
+ label = anno.label
286
+ color = label_colors_dict[label]
287
+ spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
288
+ prev_anno_ix = cur_anno_ix
289
+
290
+ if cs.partition_key() == current_consecutive_chars[0].partition_key():
291
+ # If the current charchter is in the same "group" as the previous one
292
+ current_consecutive_chars.append(cs)
293
+ else:
294
+ # Otherwise we make a span for the previous group
295
+ spans.append(
296
+ EncodingVisualizer.consecutive_chars_to_html(
297
+ current_consecutive_chars,
298
+ text=text,
299
+ encoding=encoding,
300
+ )
301
+ )
302
+ # An reset the consecutive_char_list to form a new group
303
+ current_consecutive_chars = [cs]
304
+ # All that's left is to fill out the final span
305
+ # TODO I think there is an edge case here where an annotation's span might not close
306
+ spans.append(
307
+ EncodingVisualizer.consecutive_chars_to_html(
308
+ current_consecutive_chars,
309
+ text=text,
310
+ encoding=encoding,
311
+ )
312
+ )
313
+ res = HTMLBody(spans) # Send the list of spans to the body of our html
314
+ return res
315
+
316
+ @staticmethod
317
+ def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
318
+ """
319
+ Args:
320
+ text (:obj:`str`):
321
+ The raw text we want to align to
322
+
323
+ annotations (:obj:`AnnotationList`):
324
+ A (possibly empty) list of annotations
325
+
326
+ Returns:
327
+ A list of length len(text) whose entry at index i is None if there is no annotation on
328
+ charachter i or k, the index of the annotation that covers index i where k is with
329
+ respect to the list of annotations
330
+ """
331
+ annotation_map = [None] * len(text)
332
+ for anno_ix, a in enumerate(annotations):
333
+ for i in range(a.start, a.end):
334
+ annotation_map[i] = anno_ix
335
+ return annotation_map
336
+
337
+ @staticmethod
338
+ def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
339
+ """
340
+ For each character in the original text, we emit a tuple representing it's "state":
341
+
342
+ * which token_ix it corresponds to
343
+ * which word_ix it corresponds to
344
+ * which annotation_ix it corresponds to
345
+
346
+ Args:
347
+ text (:obj:`str`):
348
+ The raw text we want to align to
349
+
350
+ annotations (:obj:`List[Annotation]`):
351
+ A (possibly empty) list of annotations
352
+
353
+ encoding: (:class:`~tokenizers.Encoding`):
354
+ The encoding returned from the tokenizer
355
+
356
+ Returns:
357
+ :obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
358
+ it's state is
359
+ """
360
+ annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
361
+ # Todo make this a dataclass or named tuple
362
+ char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
363
+ for token_ix, token in enumerate(encoding.tokens):
364
+ offsets = encoding.token_to_chars(token_ix)
365
+ if offsets is not None:
366
+ start, end = offsets
367
+ for i in range(start, end):
368
+ char_states[i].tokens.append(token_ix)
369
+ for char_ix, anno_ix in enumerate(annotation_map):
370
+ char_states[char_ix].anno_ix = anno_ix
371
+
372
+ return char_states
373
+
374
+
375
+ def HTMLBody(children: List[str], css_styles=css) -> str:
376
+ """
377
+ Generates the full html with css from a list of html spans
378
+
379
+ Args:
380
+ children (:obj:`List[str]`):
381
+ A list of strings, assumed to be html elements
382
+
383
+ css_styles (:obj:`str`, `optional`):
384
+ Optional alternative implementation of the css
385
+
386
+ Returns:
387
+ :obj:`str`: An HTML string with style markup
388
+ """
389
+ children_text = "".join(children)
390
+ return f"""
391
+ <html>
392
+ <head>
393
+ <style>
394
+ {css_styles}
395
+ </style>
396
+ </head>
397
+ <body>
398
+ <div class="tokenized-text" dir=auto>
399
+ {children_text}
400
+ </div>
401
+ </body>
402
+ </html>
403
+ """
env-llmeval/lib/python3.10/site-packages/tokenizers/trainers/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ from .. import trainers
3
+
4
+ Trainer = trainers.Trainer
5
+ BpeTrainer = trainers.BpeTrainer
6
+ UnigramTrainer = trainers.UnigramTrainer
7
+ WordLevelTrainer = trainers.WordLevelTrainer
8
+ WordPieceTrainer = trainers.WordPieceTrainer
env-llmeval/lib/python3.10/site-packages/tokenizers/trainers/__init__.pyi ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class Trainer:
3
+ """
4
+ Base class for all trainers
5
+
6
+ This class is not supposed to be instantiated directly. Instead, any implementation of a
7
+ Trainer will return an instance of this class when instantiated.
8
+ """
9
+
10
+ class BpeTrainer(Trainer):
11
+ """
12
+ Trainer capable of training a BPE model
13
+
14
+ Args:
15
+ vocab_size (:obj:`int`, `optional`):
16
+ The size of the final vocabulary, including all tokens and alphabet.
17
+
18
+ min_frequency (:obj:`int`, `optional`):
19
+ The minimum frequency a pair should have in order to be merged.
20
+
21
+ show_progress (:obj:`bool`, `optional`):
22
+ Whether to show progress bars while training.
23
+
24
+ special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
25
+ A list of special tokens the model should know of.
26
+
27
+ limit_alphabet (:obj:`int`, `optional`):
28
+ The maximum different characters to keep in the alphabet.
29
+
30
+ initial_alphabet (:obj:`List[str]`, `optional`):
31
+ A list of characters to include in the initial alphabet, even
32
+ if not seen in the training dataset.
33
+ If the strings contain more than one character, only the first one
34
+ is kept.
35
+
36
+ continuing_subword_prefix (:obj:`str`, `optional`):
37
+ A prefix to be used for every subword that is not a beginning-of-word.
38
+
39
+ end_of_word_suffix (:obj:`str`, `optional`):
40
+ A suffix to be used for every subword that is a end-of-word.
41
+
42
+ max_token_length (:obj:`int`, `optional`):
43
+ Prevents creating tokens longer than the specified size.
44
+ This can help with reducing polluting your vocabulary with
45
+ highly repetitive tokens like `======` for wikipedia
46
+
47
+ """
48
+
49
+ class UnigramTrainer(Trainer):
50
+ """
51
+ Trainer capable of training a Unigram model
52
+
53
+ Args:
54
+ vocab_size (:obj:`int`):
55
+ The size of the final vocabulary, including all tokens and alphabet.
56
+
57
+ show_progress (:obj:`bool`):
58
+ Whether to show progress bars while training.
59
+
60
+ special_tokens (:obj:`List[Union[str, AddedToken]]`):
61
+ A list of special tokens the model should know of.
62
+
63
+ initial_alphabet (:obj:`List[str]`):
64
+ A list of characters to include in the initial alphabet, even
65
+ if not seen in the training dataset.
66
+ If the strings contain more than one character, only the first one
67
+ is kept.
68
+
69
+ shrinking_factor (:obj:`float`):
70
+ The shrinking factor used at each step of the training to prune the
71
+ vocabulary.
72
+
73
+ unk_token (:obj:`str`):
74
+ The token used for out-of-vocabulary tokens.
75
+
76
+ max_piece_length (:obj:`int`):
77
+ The maximum length of a given token.
78
+
79
+ n_sub_iterations (:obj:`int`):
80
+ The number of iterations of the EM algorithm to perform before
81
+ pruning the vocabulary.
82
+ """
83
+
84
+ def __init__(
85
+ self,
86
+ vocab_size=8000,
87
+ show_progress=True,
88
+ special_tokens=[],
89
+ shrinking_factor=0.75,
90
+ unk_token=None,
91
+ max_piece_length=16,
92
+ n_sub_iterations=2,
93
+ ):
94
+ pass
95
+
96
+ class WordLevelTrainer(Trainer):
97
+ """
98
+ Trainer capable of training a WorldLevel model
99
+
100
+ Args:
101
+ vocab_size (:obj:`int`, `optional`):
102
+ The size of the final vocabulary, including all tokens and alphabet.
103
+
104
+ min_frequency (:obj:`int`, `optional`):
105
+ The minimum frequency a pair should have in order to be merged.
106
+
107
+ show_progress (:obj:`bool`, `optional`):
108
+ Whether to show progress bars while training.
109
+
110
+ special_tokens (:obj:`List[Union[str, AddedToken]]`):
111
+ A list of special tokens the model should know of.
112
+ """
113
+
114
+ class WordPieceTrainer(Trainer):
115
+ """
116
+ Trainer capable of training a WordPiece model
117
+
118
+ Args:
119
+ vocab_size (:obj:`int`, `optional`):
120
+ The size of the final vocabulary, including all tokens and alphabet.
121
+
122
+ min_frequency (:obj:`int`, `optional`):
123
+ The minimum frequency a pair should have in order to be merged.
124
+
125
+ show_progress (:obj:`bool`, `optional`):
126
+ Whether to show progress bars while training.
127
+
128
+ special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
129
+ A list of special tokens the model should know of.
130
+
131
+ limit_alphabet (:obj:`int`, `optional`):
132
+ The maximum different characters to keep in the alphabet.
133
+
134
+ initial_alphabet (:obj:`List[str]`, `optional`):
135
+ A list of characters to include in the initial alphabet, even
136
+ if not seen in the training dataset.
137
+ If the strings contain more than one character, only the first one
138
+ is kept.
139
+
140
+ continuing_subword_prefix (:obj:`str`, `optional`):
141
+ A prefix to be used for every subword that is not a beginning-of-word.
142
+
143
+ end_of_word_suffix (:obj:`str`, `optional`):
144
+ A suffix to be used for every subword that is a end-of-word.
145
+ """
146
+
147
+ def __init__(
148
+ self,
149
+ vocab_size=30000,
150
+ min_frequency=0,
151
+ show_progress=True,
152
+ special_tokens=[],
153
+ limit_alphabet=None,
154
+ initial_alphabet=[],
155
+ continuing_subword_prefix="##",
156
+ end_of_word_suffix=None,
157
+ ):
158
+ pass
env-llmeval/lib/python3.10/site-packages/tokenizers/trainers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (330 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """torchgen
2
+
3
+ This module contains codegeneration utilities for PyTorch. It is used to
4
+ build PyTorch from source, but may also be used for out-of-tree projects
5
+ that extend PyTorch.
6
+
7
+ Note well that we provide no BC guarantees for torchgen. If you're interested
8
+ in using torchgen and want the PyTorch team to be aware, please reach out
9
+ on GitHub.
10
+ """
env-llmeval/lib/python3.10/site-packages/torchgen/api/lazy.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional, Tuple, Union
2
+
3
+ from torchgen.api.types import (
4
+ BaseCppType,
5
+ BaseCType,
6
+ boolT,
7
+ CType,
8
+ deviceT,
9
+ doubleT,
10
+ generatorT,
11
+ layoutT,
12
+ ListCType,
13
+ longT,
14
+ memoryFormatT,
15
+ NamedCType,
16
+ OptionalCType,
17
+ scalarT,
18
+ scalarTypeT,
19
+ stringT,
20
+ SymIntT,
21
+ VectorCType,
22
+ )
23
+
24
+ from torchgen.model import (
25
+ Argument,
26
+ BaseTy,
27
+ BaseType,
28
+ FunctionSchema,
29
+ ListType,
30
+ OperatorName,
31
+ OptionalType,
32
+ Return,
33
+ TensorOptionsArguments,
34
+ Type,
35
+ )
36
+
37
+
38
+ _valueT = None
39
+
40
+
41
+ # A ValueT is an IR type which represents the computation of a Tensor. In other
42
+ # words, a PyTorch user will do operations on lazy tensors, and each output lazy
43
+ # tensor internally tracks a ValueT representing the IR node that would have
44
+ # actually produced the value of this tensor for real.
45
+ #
46
+ # This is configurable because different lazy tensor backends (LTC vs XLA) will
47
+ # have different IR representations. (Though, arguably, after unification they
48
+ # shouldn't!)
49
+ def getValueT() -> BaseCppType:
50
+ global _valueT
51
+ if not _valueT:
52
+ raise NotImplementedError(
53
+ "The value type needs to be set with setValueT() in run_gen_lazy_tensor()"
54
+ )
55
+
56
+ return _valueT
57
+
58
+
59
+ def setValueT(val: BaseCppType) -> None:
60
+ global _valueT
61
+ _valueT = val
62
+
63
+
64
+ # this is a bad hack. I need to refactor the data model to represent each arg in the schema as an object,
65
+ # making it easier to represent special properties of an arg.
66
+ tensorListValueT = BaseCppType("torch::lazy", "Value")
67
+
68
+
69
+ def process_ir_type(
70
+ typ: Type, properties: "LazyIrProperties", *, symint: bool
71
+ ) -> Union[BaseCType, VectorCType, OptionalCType, ListCType]:
72
+ """
73
+ This function takes a type from NativeFunctions and converts it for use with
74
+ lazy tensor codegen.
75
+
76
+ Type conversion for lazy currently consists of
77
+ (1) changing at::Tensors into lazy::Values
78
+ (2) wrapping everything in a BaseCType
79
+ (3) making cpp-reference types into cpp-value types (e.g. vector instead of IntArrayRef)
80
+
81
+ (1) converts at::Tensors to lazy::Values (which wrap lazy::Nodes, with which Lazy IR represents tensors.)
82
+ There is special handling for Optional[Tensor] or List[Tensor], etc- hence 'tensor-like'
83
+
84
+ This is incomplete- there are assertions in places that it's expected to need to add
85
+ more types as the codegen is used with more operators.
86
+ """
87
+ if isinstance(typ, BaseType):
88
+ if typ.name == BaseTy.Tensor:
89
+ return BaseCType(getValueT())
90
+ elif typ.name == BaseTy.Scalar:
91
+ if properties.TreatScalarsAsConstants:
92
+ return BaseCType(scalarT)
93
+ # at::scalar has special handling,
94
+ # and is wrapped in an lazy::Value just like at::tensor
95
+ return BaseCType(getValueT())
96
+ elif typ.name == BaseTy.ScalarType:
97
+ return BaseCType(scalarTypeT)
98
+ elif typ.name == BaseTy.int:
99
+ return BaseCType(longT)
100
+ elif typ.name == BaseTy.SymInt:
101
+ if symint:
102
+ return BaseCType(getValueT())
103
+ else:
104
+ return BaseCType(longT)
105
+ elif typ.name == BaseTy.bool:
106
+ return BaseCType(boolT)
107
+ elif typ.name == BaseTy.float:
108
+ return BaseCType(doubleT)
109
+ elif typ.name == BaseTy.str:
110
+ return BaseCType(stringT)
111
+ elif typ.name == BaseTy.Device:
112
+ return BaseCType(deviceT)
113
+ elif typ.name == BaseTy.Generator:
114
+ return BaseCType(generatorT)
115
+ elif typ.name == BaseTy.Layout:
116
+ return BaseCType(layoutT)
117
+ elif typ.name == BaseTy.MemoryFormat:
118
+ return BaseCType(memoryFormatT)
119
+ else:
120
+ raise AssertionError(f"TODO add support for type {repr(typ)}")
121
+ elif isinstance(typ, OptionalType):
122
+ return OptionalCType(process_ir_type(typ.elem, properties, symint=symint))
123
+ elif isinstance(typ, ListType):
124
+ if str(typ.elem) == "Tensor?":
125
+ # TODO(whc) is this actually correct? or should it use a Vector like above
126
+ return ListCType(OptionalCType(BaseCType(getValueT())))
127
+ elif str(typ.elem) == "Tensor":
128
+ # this is a TensorList which comes in from GetTensorList as a Value
129
+ return BaseCType(tensorListValueT)
130
+ elif typ.elem == BaseType(BaseTy.SymInt):
131
+ # TODO: return a value type. The problem here is analogous to
132
+ # the problem with tensorListValueT: if you have SymInt[] you
133
+ # cannot conveniently save the list of Value directly, as nodes
134
+ # expect to save values as a vector for ALL arguments. So you
135
+ # need a separate IR node that represents all of the size nodes
136
+ # assembled into a list. I'm not an LTC dev so I don't want to
137
+ # figure it out right now. Y'all figure it out...
138
+ return VectorCType(BaseCType(longT))
139
+
140
+ else:
141
+ return VectorCType(process_ir_type(typ.elem, properties, symint=symint))
142
+ else:
143
+ raise AssertionError(f"unrecognized type {repr(typ)}")
144
+
145
+
146
+ # TODO: Determining this based off of CType is bad; this should be computed
147
+ # from Type directly; then the same logic as process_ir_type can be used
148
+ #
149
+ # Invariant: passed typ should be an *owning* CType (e.g., we will report
150
+ # that ArrayRef<Value> is NOT a value type)
151
+ def isValueType(typ: CType, properties: "Optional[LazyIrProperties]" = None) -> bool:
152
+ """
153
+ Given a type, determine if it is a Value-like type. This is equivalent to
154
+ being Tensor-like, but assumes the type has already been transformed.
155
+ """
156
+ if isinstance(typ, BaseCType):
157
+ # I am regretting my naming conventions, but now we are wrapping at::scalar in
158
+ # lazy value, while preserving other 'scalar' types as scalars in the IR
159
+ treat_scalars_as_constants = properties and properties.TreatScalarsAsConstants
160
+ return (
161
+ typ.type == getValueT()
162
+ or (typ.type == scalarT and not treat_scalars_as_constants)
163
+ or typ.type == SymIntT
164
+ )
165
+ elif typ == VectorCType(BaseCType(SymIntT)):
166
+ # TODO: report True for this
167
+ return False
168
+ elif isinstance(typ, (OptionalCType, ListCType, VectorCType)):
169
+ return isValueType(typ.elem, properties)
170
+ return False
171
+
172
+
173
+ def isSymIntType(typ: Type) -> bool:
174
+ return isinstance(typ, BaseType) and typ.name == BaseTy.SymInt
175
+
176
+
177
+ def isWrappedScalarType(typ: Type) -> bool:
178
+ """
179
+ Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value.
180
+ Since we literally change the type from scalarT to valueT, information is lost.
181
+ This function helps build a list of wrapped scalars to save that information
182
+ """
183
+ if isinstance(typ, BaseType):
184
+ # I am regretting my naming conventions, but now we are wrapping at::scalar in
185
+ # lazy value, while preserving other 'scalar' types as scalars in the IR
186
+ return typ.name == BaseTy.Scalar
187
+ elif isinstance(typ, (OptionalType, ListType)):
188
+ return isWrappedScalarType(typ.elem)
189
+ return False
190
+
191
+
192
+ # TODO: dedupe with Type.is_generator_like
193
+ def isGeneratorType(typ: Type) -> bool:
194
+ if isinstance(typ, BaseType):
195
+ return typ.name == BaseTy.Generator
196
+ elif isinstance(typ, (OptionalType)):
197
+ return isGeneratorType(typ.elem)
198
+ return False
199
+
200
+
201
+ # This class caches a few derived properties computed from an Argument
202
+ # and LazyIrProperties
203
+ class LazyArgument:
204
+ name: str
205
+ orig_type: Type
206
+ lazy_type_: Optional[CType]
207
+ is_wrapped_scalar: bool
208
+ is_generator: bool
209
+ # TODO: this is lies, it is false for symint list
210
+ is_symint_or_list: bool
211
+
212
+ # Whether or not we are treating this as symint or not
213
+ symint: bool
214
+
215
+ # true if this argument is or contains a lazy IR value
216
+ is_lazy_value: bool
217
+
218
+ def __init__(self, arg: Argument, properties: "LazyIrProperties", *, symint: bool):
219
+ self.name = arg.name
220
+ self.orig_type = arg.type
221
+ self.symint = symint
222
+ self.is_optional = isinstance(arg.type, OptionalType)
223
+ self.is_generator = isGeneratorType(arg.type)
224
+ self.lazy_type_ = process_ir_type(arg.type, properties, symint=symint)
225
+ self.is_wrapped_scalar = isWrappedScalarType(arg.type)
226
+ self.is_symint_or_list = symint and (
227
+ isSymIntType(arg.type)
228
+ or (isinstance(arg.type, OptionalType) and isSymIntType(arg.type.elem))
229
+ # TODO: lists of symints are not currently treated as value types
230
+ # or (isinstance(arg.type, ListType) and isSymIntType(arg.type.elem))
231
+ )
232
+
233
+ self.is_lazy_value = isValueType(self.lazy_type, properties)
234
+
235
+ @property
236
+ def lazy_type(self) -> CType:
237
+ assert (
238
+ self.lazy_type_ is not None
239
+ ), f"Attempted to access lazy_type for invalid argument {self.name}"
240
+ return self.lazy_type_
241
+
242
+
243
+ class LazyIrProperties:
244
+ """Collection of properties for an IR node
245
+
246
+ The property groups are listed below. Each group is mutually
247
+ exclusive, meaning that only one property from each group can be True
248
+ at any one time. The properties can be accessed as if they were normal
249
+ attributes. The mutual exclusivity is automatically handled.
250
+ """
251
+
252
+ Properties: Tuple[Tuple[str, ...], ...] = (
253
+ (
254
+ "ShapePrecompute", # Assume shape has been precomputed
255
+ "ShapeCompute", # Need to compute the shape on construction
256
+ "ShapeCache", # Utilize the shape cache to defer computation
257
+ ),
258
+ (
259
+ "Lower", # Codegen full lower function
260
+ "LowerDeclOnly", # Codegen only lower function declaration
261
+ ),
262
+ (
263
+ "CanBeReused", # Codegen full reuse function
264
+ "CanBeReusedDeclOnly", # Codegen only reuse function declaration
265
+ ),
266
+ (
267
+ "CreateFn", # Codegen full create function
268
+ "CreateFnDeclOnly", # Codegen only create function declaration
269
+ ),
270
+ (
271
+ "TreatScalarsAsConstants", # Treat Scalars as constants instead of handling like values
272
+ ),
273
+ )
274
+
275
+ def __init__(self, *default_properties: str):
276
+ properties: Dict[Tuple[str, ...], Optional[str]] = {
277
+ p: None for p in LazyIrProperties.Properties
278
+ }
279
+ self.__dict__["properties"] = properties
280
+ for p in default_properties:
281
+ setattr(self, p, True)
282
+
283
+ def __getattr__(self, key: str) -> Any:
284
+ properties = self.__dict__["properties"]
285
+ for values in LazyIrProperties.Properties:
286
+ if key in values:
287
+ return properties[values] == key
288
+
289
+ return self.__getattribute__(key)
290
+
291
+ def __setattr__(self, key: str, value: Any) -> Any:
292
+ properties = self.__dict__["properties"]
293
+ for values in LazyIrProperties.Properties:
294
+ if key in values:
295
+ properties[values] = key if value else None
296
+ return value
297
+
298
+ raise KeyError(f"Invalid property: {key}")
299
+
300
+
301
+ # Inspired by a FunctionSchema object, a LazyIrSchema holds the schema of a Lazy IR node.
302
+ # Unlike a FunctionSchema, it has no round-trippable string form (relating to the YAML),
303
+ # but carries type information from a native FunctionSchema modified for use with IR nodes,
304
+ # and preserving original argument names.
305
+ #
306
+ # TODO: This is not idiomatic with how other torchgen APIs transform on schema.
307
+ class LazyIrSchema:
308
+ # The name of the operator this function schema describes.
309
+ name: "OperatorName"
310
+
311
+ positional_args: Tuple[LazyArgument, ...]
312
+ keyword_args: Tuple[LazyArgument, ...]
313
+
314
+ # TODO: Need to handle collisions with argument names at some point
315
+ returns: Tuple["Return", ...]
316
+
317
+ # if this schema has a Generator arg, list its orig ctype/name but don't
318
+ # build a LazyArgument since lazy IR doesn't support it
319
+ generator_arg: Optional[NamedCType] = None
320
+
321
+ # original function schema
322
+ func: FunctionSchema
323
+
324
+ # Whether or not we are code-genning for SymInt or not
325
+ symint: bool
326
+
327
+ properties: LazyIrProperties = LazyIrProperties(
328
+ # default properties
329
+ "ShapePrecompute",
330
+ "Lower",
331
+ "CanBeReused",
332
+ )
333
+ opkind: Optional[str] = None
334
+
335
+ def __init__(
336
+ self,
337
+ func: FunctionSchema,
338
+ properties: Optional[LazyIrProperties] = None,
339
+ *,
340
+ symint: bool,
341
+ ):
342
+ if properties:
343
+ self.properties = properties
344
+
345
+ self.func = func
346
+ self.symint = symint
347
+ positional_args: List[LazyArgument] = []
348
+ for arg_field in ["pre_self_positional", "self_arg", "post_self_positional"]:
349
+ if arg_field == "self_arg" and func.arguments.self_arg is not None:
350
+ arg = func.arguments.self_arg.argument
351
+ positional_args.append(
352
+ LazyArgument(arg, self.properties, symint=symint)
353
+ )
354
+ elif getattr(func.arguments, arg_field) is not None:
355
+ positional_args.extend(
356
+ LazyArgument(arg, self.properties, symint=symint)
357
+ for arg in getattr(func.arguments, arg_field)
358
+ )
359
+ self.positional_args = tuple(positional_args)
360
+
361
+ keyword_args: List[LazyArgument] = []
362
+ for arg_field in [
363
+ "pre_tensor_options_kwarg_only",
364
+ "tensor_options",
365
+ "post_tensor_options_kwarg_only",
366
+ "out",
367
+ ]:
368
+ curr_args = getattr(func.arguments, arg_field)
369
+ if curr_args is not None:
370
+ if isinstance(curr_args, TensorOptionsArguments):
371
+ curr_args = curr_args.all()
372
+ for arg in curr_args:
373
+ if isGeneratorType(arg.type):
374
+ assert (
375
+ self.generator_arg is None
376
+ ), "We expect there is only one generator arg"
377
+ self.generator_arg = NamedCType(
378
+ arg.name, arg.type # type:ignore[arg-type]
379
+ )
380
+ keyword_args.extend(
381
+ LazyArgument(arg, self.properties, symint=symint)
382
+ for arg in curr_args
383
+ )
384
+ self.keyword_args = tuple(keyword_args)
385
+ self.name = func.name
386
+ self.returns = func.returns
387
+
388
+ @property
389
+ def node_name(self) -> str:
390
+ """
391
+ Return camel-case version of op in node.
392
+
393
+ Note: This function also appends any `overload_name` in the operation.
394
+ For example, if the op is `bitwise_and.Tensor`, the returned name
395
+ will be `BitwiseAndTensor`.
396
+ """
397
+ op_name = f"{self.name.name}_{self.name.overload_name}".lower()
398
+ return "".join(word.capitalize() or "" for word in op_name.split("_"))
399
+
400
+ @property
401
+ def aten_name(self) -> str:
402
+ return str(self.name.name)
403
+
404
+ @property
405
+ def base_name(self) -> str:
406
+ return f"{self.name.name.base}"
407
+
408
+ def filtered_args(
409
+ self,
410
+ positional: bool = True,
411
+ keyword: bool = True,
412
+ values: bool = True,
413
+ scalars: bool = True,
414
+ generator: bool = True,
415
+ ) -> List[LazyArgument]:
416
+ # This function maintains the sorted order of arguments but provides different filtered views.
417
+ # Some parts of the code care about kwargs vs args (TS lowerings),
418
+ # other parts care about whether they need to wrap the arg in a lazy value or leave it alone.
419
+ # Generators are special cased, as they are needed for fallback/shape-inference but not supported
420
+ # in TS lowerings and therefore also omitted from lazy IR.
421
+ args: List[LazyArgument] = []
422
+ if positional:
423
+ args.extend(self.positional_args)
424
+ if keyword:
425
+ args.extend(self.keyword_args)
426
+
427
+ if values and scalars and generator:
428
+ return args
429
+ elif values and scalars:
430
+ return [a for a in args if not a.is_generator]
431
+ elif values:
432
+ return [a for a in args if a.is_lazy_value]
433
+ elif scalars:
434
+ return [
435
+ a
436
+ for a in args
437
+ if not a.is_lazy_value and (generator or not a.is_generator)
438
+ ]
439
+
440
+ return []
441
+
442
+ @property
443
+ def positional_values(self) -> List[LazyArgument]:
444
+ return self.filtered_args(
445
+ positional=True, keyword=False, values=True, scalars=False
446
+ )
447
+
448
+ @property
449
+ def positional_scalars(self) -> List[LazyArgument]:
450
+ return self.filtered_args(
451
+ positional=True, keyword=False, values=False, scalars=True
452
+ )
453
+
454
+ @property
455
+ def keyword_values(self) -> List[LazyArgument]:
456
+ return self.filtered_args(
457
+ positional=False, keyword=True, values=True, scalars=False
458
+ )
459
+
460
+ @property
461
+ def keyword_scalars(self) -> List[LazyArgument]:
462
+ return self.filtered_args(
463
+ positional=False, keyword=True, values=False, scalars=True
464
+ )
env-llmeval/lib/python3.10/site-packages/torchgen/code_template.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Mapping, Match, Optional, Sequence
3
+
4
+ # match $identifier or ${identifier} and replace with value in env
5
+ # If this identifier is at the beginning of whitespace on a line
6
+ # and its value is a list then it is treated as
7
+ # block substitution by indenting to that depth and putting each element
8
+ # of the list on its own line
9
+ # if the identifier is on a line starting with non-whitespace and a list
10
+ # then it is comma separated ${,foo} will insert a comma before the list
11
+ # if this list is not empty and ${foo,} will insert one after.
12
+
13
+
14
+ class CodeTemplate:
15
+ substitution_str = r"(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})"
16
+ substitution = re.compile(substitution_str, re.MULTILINE)
17
+
18
+ pattern: str
19
+ filename: str
20
+
21
+ @staticmethod
22
+ def from_file(filename: str) -> "CodeTemplate":
23
+ with open(filename) as f:
24
+ return CodeTemplate(f.read(), filename)
25
+
26
+ def __init__(self, pattern: str, filename: str = "") -> None:
27
+ self.pattern = pattern
28
+ self.filename = filename
29
+
30
+ def substitute(
31
+ self, env: Optional[Mapping[str, object]] = None, **kwargs: object
32
+ ) -> str:
33
+ if env is None:
34
+ env = {}
35
+
36
+ def lookup(v: str) -> object:
37
+ assert env is not None
38
+ return kwargs[v] if v in kwargs else env[v]
39
+
40
+ def indent_lines(indent: str, v: Sequence[object]) -> str:
41
+ return "".join(
42
+ [indent + l + "\n" for e in v for l in str(e).splitlines()]
43
+ ).rstrip()
44
+
45
+ def replace(match: Match[str]) -> str:
46
+ indent = match.group(1)
47
+ key = match.group(2)
48
+ comma_before = ""
49
+ comma_after = ""
50
+ if key[0] == "{":
51
+ key = key[1:-1]
52
+ if key[0] == ",":
53
+ comma_before = ", "
54
+ key = key[1:]
55
+ if key[-1] == ",":
56
+ comma_after = ", "
57
+ key = key[:-1]
58
+ v = lookup(key)
59
+ if indent is not None:
60
+ if not isinstance(v, list):
61
+ v = [v]
62
+ return indent_lines(indent, v)
63
+ elif isinstance(v, list):
64
+ middle = ", ".join([str(x) for x in v])
65
+ if len(v) == 0:
66
+ return middle
67
+ return comma_before + middle + comma_after
68
+ else:
69
+ return str(v)
70
+
71
+ return self.substitution.sub(replace, self.pattern)
72
+
73
+
74
+ if __name__ == "__main__":
75
+ c = CodeTemplate(
76
+ """\
77
+ int foo($args) {
78
+
79
+ $bar
80
+ $bar
81
+ $a+$b
82
+ }
83
+ int commatest(int a${,stuff})
84
+ int notest(int a${,empty,})
85
+ """
86
+ )
87
+ print(
88
+ c.substitute(
89
+ args=["hi", 8],
90
+ bar=["what", 7],
91
+ a=3,
92
+ b=4,
93
+ stuff=["things...", "others"],
94
+ empty=[],
95
+ )
96
+ )
env-llmeval/lib/python3.10/site-packages/torchgen/context.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ import functools
4
+ from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar, Union
5
+
6
+ import torchgen.local as local
7
+ from torchgen.model import (
8
+ BackendIndex,
9
+ DispatchKey,
10
+ NativeFunction,
11
+ NativeFunctionsGroup,
12
+ NativeFunctionsViewGroup,
13
+ )
14
+ from torchgen.utils import context, S, T
15
+
16
+ # Helper functions for defining generators on things in the model
17
+
18
+ F = TypeVar(
19
+ "F",
20
+ NativeFunction,
21
+ NativeFunctionsGroup,
22
+ NativeFunctionsViewGroup,
23
+ Union[NativeFunction, NativeFunctionsGroup],
24
+ Union[NativeFunction, NativeFunctionsViewGroup],
25
+ )
26
+
27
+ F2 = TypeVar(
28
+ "F2",
29
+ NativeFunction,
30
+ NativeFunctionsGroup,
31
+ Optional[NativeFunction],
32
+ bool,
33
+ str,
34
+ )
35
+
36
+ F3 = TypeVar("F3", Tuple[NativeFunction, Any], List[NativeFunction])
37
+
38
+
39
+ @contextlib.contextmanager
40
+ def native_function_manager(
41
+ g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup, NativeFunction]
42
+ ) -> Iterator[None]:
43
+ if isinstance(g, NativeFunctionsGroup):
44
+ # By default, we associate all errors with structured native functions
45
+ # with the out variant. In some cases, it might be better to have
46
+ # a more specific place to hang things; if so, use
47
+ # native_function_manager again on the inside
48
+ f = g.out
49
+ elif isinstance(g, NativeFunctionsViewGroup):
50
+ # We associate errors with the view operator
51
+ f = g.view
52
+ else:
53
+ f = g
54
+ with context(lambda: f"in native_functions.yaml line {f.loc}:\n {f.func}"):
55
+ with local.parametrize(
56
+ use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
57
+ use_ilistref_for_tensor_lists=f.part_of_structured_group,
58
+ ):
59
+ yield
60
+
61
+
62
+ # Given a function that operates on NativeFunction, wrap it into a new function
63
+ # that sets some appropriate context managers for that native function.
64
+ # YOU MUST WRAP FUNCTIONS IN THIS for calls to api modules to be sound
65
+ # (you will get an error if we try to access the local variables without having
66
+ # set them).
67
+ def with_native_function(func: Callable[[F], T]) -> Callable[[F], T]:
68
+ @functools.wraps(func)
69
+ def wrapper(f: F) -> T:
70
+ with native_function_manager(f):
71
+ return func(f)
72
+
73
+ return wrapper
74
+
75
+
76
+ def with_native_function_and(func: Callable[[F, F2], T]) -> Callable[[F, F2], T]:
77
+ @functools.wraps(func)
78
+ def wrapper(f: F, f2: F2) -> T:
79
+ # The first native_function is assumed to be the one with the appropriate context.
80
+ with native_function_manager(f):
81
+ return func(f, f2)
82
+
83
+ return wrapper
84
+
85
+
86
+ def method_with_native_function(func: Callable[[S, F], T]) -> Callable[[S, F], T]:
87
+ @functools.wraps(func)
88
+ def wrapper(slf: S, f: F) -> T:
89
+ with native_function_manager(f):
90
+ return func(slf, f)
91
+
92
+ return wrapper
93
+
94
+
95
+ def method_with_nested_native_function(
96
+ func: Callable[[S, F3], T]
97
+ ) -> Callable[[S, F3], T]:
98
+ @functools.wraps(func)
99
+ def wrapper(slf: S, f: F3) -> T:
100
+ with native_function_manager(f[0]):
101
+ return func(slf, f)
102
+
103
+ return wrapper
104
+
105
+
106
+ # Convenience decorator for functions that explicitly take in a BackendIndex,
107
+ # instead of indirectly taking one in as a closure
108
+ def with_native_function_and_index(
109
+ func: Callable[[F, BackendIndex], T]
110
+ ) -> Callable[[F, BackendIndex], T]:
111
+ @functools.wraps(func)
112
+ def wrapper(f: F, backend_index: BackendIndex) -> T:
113
+ with native_function_manager(f):
114
+ return func(f, backend_index)
115
+
116
+ return wrapper
117
+
118
+
119
+ # Convenience decorator for functions that explicitly take in a Dict of BackendIndices
120
+ def with_native_function_and_indices(
121
+ func: Callable[[F, Dict[DispatchKey, BackendIndex]], T]
122
+ ) -> Callable[[F, Dict[DispatchKey, BackendIndex]], T]:
123
+ @functools.wraps(func)
124
+ def wrapper(f: F, backend_indices: Dict[DispatchKey, BackendIndex]) -> T:
125
+ with native_function_manager(f):
126
+ return func(f, backend_indices)
127
+
128
+ return wrapper
env-llmeval/lib/python3.10/site-packages/torchgen/gen.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pathlib
4
+ import re
5
+ from collections import Counter, defaultdict, namedtuple
6
+ from typing import Dict, List, Optional, Sequence, Set, Union
7
+
8
+ import yaml
9
+
10
+ import torchgen.api.dispatcher as dispatcher
11
+ import torchgen.dest as dest
12
+ from torchgen.api.types import DispatcherSignature
13
+ from torchgen.code_template import CodeTemplate
14
+ from torchgen.context import native_function_manager
15
+ from torchgen.gen import get_grouped_native_functions, parse_native_yaml
16
+ from torchgen.model import (
17
+ BackendIndex,
18
+ BackendMetadata,
19
+ DispatchKey,
20
+ NativeFunction,
21
+ NativeFunctionsGroup,
22
+ OperatorName,
23
+ )
24
+ from torchgen.selective_build.selector import SelectiveBuilder
25
+ from torchgen.utils import concatMap, context, FileManager, NamespaceHelper, Target
26
+ from torchgen.yaml_utils import YamlLoader
27
+
28
+
29
+ # Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
30
+ # Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping)
31
+ ParsedExternalYaml = namedtuple(
32
+ "ParsedExternalYaml",
33
+ ["backend_key", "autograd_key", "class_name", "cpp_namespace", "backend_indices"],
34
+ )
35
+
36
+
37
+ def parse_backend_yaml(
38
+ backend_yaml_path: str,
39
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
40
+ backend_indices: Dict[DispatchKey, BackendIndex],
41
+ ) -> ParsedExternalYaml:
42
+ native_functions_map: Dict[OperatorName, NativeFunction] = {
43
+ f.func.name: f
44
+ for f in concatMap(
45
+ lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
46
+ grouped_native_functions,
47
+ )
48
+ }
49
+
50
+ with open(backend_yaml_path) as f:
51
+ yaml_values = yaml.load(f, Loader=YamlLoader)
52
+ assert isinstance(yaml_values, dict)
53
+
54
+ valid_keys = [
55
+ "backend",
56
+ "class_name",
57
+ "cpp_namespace",
58
+ "extra_headers",
59
+ "supported",
60
+ "autograd",
61
+ "full_codegen",
62
+ "non_native",
63
+ "ir_gen",
64
+ "symint",
65
+ ]
66
+
67
+ backend = yaml_values.pop("backend", None)
68
+ assert backend is not None, 'You must provide a value for "backend"'
69
+
70
+ class_name = yaml_values.pop("class_name", None)
71
+
72
+ cpp_namespace = yaml_values.pop("cpp_namespace", None)
73
+ assert cpp_namespace is not None, 'You must provide a value for "cpp_namespace"'
74
+
75
+ # Mostly just defaulting to false to stick with LazyTensor convention.
76
+ use_out_as_primary = yaml_values.pop("use_out_as_primary", False)
77
+ assert isinstance(
78
+ use_out_as_primary, bool
79
+ ), f"You must provide either True or False for use_out_as_primary. Provided: {use_out_as_primary}"
80
+
81
+ use_device_guard = yaml_values.pop("device_guard", False)
82
+ assert isinstance(
83
+ use_device_guard, bool
84
+ ), f"You must provide either True or False for device_guard. Provided: {use_device_guard}"
85
+
86
+ supported = yaml_values.pop("supported", [])
87
+ if supported is None:
88
+ supported = [] # Allow an empty list of supported ops
89
+ assert isinstance(
90
+ supported, list
91
+ ), f'expected "supported" to be a list, but got: {supported} (of type {type(supported)})'
92
+
93
+ symint = yaml_values.pop("symint", [])
94
+ if symint is None:
95
+ symint = [] # Allow an empty list of symint ops
96
+ assert isinstance(
97
+ symint, list
98
+ ), f'expected "symint" to be a list, but got: {supported} (of type {type(supported)})'
99
+ symint_set = set(symint)
100
+
101
+ supported_autograd = yaml_values.pop("autograd", [])
102
+ assert isinstance(
103
+ supported_autograd, list
104
+ ), f'expected "autograd" to be a list, but got: {supported_autograd}'
105
+
106
+ # full_codegen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
107
+ full_codegen = yaml_values.pop("full_codegen", [])
108
+ supported.extend(full_codegen)
109
+
110
+ # non_native is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
111
+ non_native = yaml_values.pop("non_native", {})
112
+
113
+ # ir_gen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
114
+ _ = yaml_values.pop("ir_gen", {})
115
+
116
+ assert (
117
+ len(yaml_values.keys()) == 0
118
+ ), f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \
119
+ Only the following keys are supported: {", ".join(valid_keys)}'
120
+
121
+ def create_backend_index(
122
+ backend_ops: List[str],
123
+ symint_ops: Set[str],
124
+ dispatch_key: DispatchKey,
125
+ *,
126
+ use_out_as_primary: bool,
127
+ use_device_guard: bool,
128
+ ) -> BackendIndex:
129
+ metadata: Dict[OperatorName, BackendMetadata] = {}
130
+ for op in backend_ops:
131
+ op_name = OperatorName.parse(op)
132
+ assert (
133
+ op_name in native_functions_map
134
+ ), f"Found an invalid operator name: {op_name}"
135
+ # See Note [External Backends Follow Dispatcher API]
136
+ kernel_name = dispatcher.name(native_functions_map[op_name].func)
137
+ if op in symint_ops:
138
+ kernel_name += "_symint"
139
+ # TODO: allow structured external backends later.
140
+ m = BackendMetadata(
141
+ kernel=kernel_name, structured=False, cpp_namespace=cpp_namespace
142
+ )
143
+ metadata[op_name] = m
144
+ return BackendIndex(
145
+ dispatch_key=dispatch_key,
146
+ use_out_as_primary=use_out_as_primary,
147
+ external=True,
148
+ device_guard=use_device_guard,
149
+ index=metadata,
150
+ )
151
+
152
+ backend_key: Optional[DispatchKey] = None
153
+ if len(supported) > 0:
154
+ with context(
155
+ lambda: f'The provided value for "backend" must be a valid DispatchKey, but got {backend}.'
156
+ ):
157
+ backend_key = DispatchKey.parse(backend)
158
+
159
+ backend_idx = create_backend_index(
160
+ supported,
161
+ symint_set,
162
+ backend_key,
163
+ use_out_as_primary=use_out_as_primary,
164
+ use_device_guard=use_device_guard,
165
+ )
166
+ assert backend_key not in backend_indices
167
+ backend_indices[backend_key] = backend_idx
168
+
169
+ autograd_key: Optional[DispatchKey] = None
170
+ if len(supported_autograd) > 0:
171
+ with context(
172
+ lambda: f'The "autograd" key was specified, which indicates that you would like to override \
173
+ the behavior of autograd for some operators on your backend. However "Autograd{backend}" is not a valid DispatchKey.'
174
+ ):
175
+ autograd_key = DispatchKey.parse(f"Autograd{backend}")
176
+
177
+ autograd_idx = create_backend_index(
178
+ supported_autograd,
179
+ symint_set,
180
+ autograd_key,
181
+ use_out_as_primary=use_out_as_primary,
182
+ use_device_guard=use_device_guard,
183
+ )
184
+ assert autograd_key not in backend_indices
185
+ backend_indices[autograd_key] = autograd_idx
186
+
187
+ for g in grouped_native_functions:
188
+ if isinstance(g, NativeFunction):
189
+ forward_kernels = (
190
+ []
191
+ if backend_key is None
192
+ else [
193
+ m
194
+ for m in [backend_indices[backend_key].get_kernel(g)]
195
+ if m is not None
196
+ ]
197
+ )
198
+ backward_kernels = (
199
+ []
200
+ if autograd_key is None
201
+ else [
202
+ m
203
+ for m in [backend_indices[autograd_key].get_kernel(g)]
204
+ if m is not None
205
+ ]
206
+ )
207
+ else:
208
+ forward_kernels = (
209
+ []
210
+ if backend_key is None
211
+ else [
212
+ m
213
+ for m in [
214
+ backend_indices[backend_key].get_kernel(f)
215
+ for f in g.functions()
216
+ ]
217
+ if m is not None
218
+ ]
219
+ )
220
+ backward_kernels = (
221
+ []
222
+ if autograd_key is None
223
+ else [
224
+ m
225
+ for m in [
226
+ backend_indices[autograd_key].get_kernel(f)
227
+ for f in g.functions()
228
+ ]
229
+ if m is not None
230
+ ]
231
+ )
232
+
233
+ forward_kernels = [f for f in forward_kernels if f is not None]
234
+ backward_kernels = [f for f in backward_kernels if f is not None]
235
+ assert (
236
+ len(forward_kernels) == 0 or len(backward_kernels) == 0
237
+ ), f'Currently, all variants of an op must either be registered to a backend key, or to a backend\'s \
238
+ autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! \
239
+ {forward_kernels[0].kernel} is listed under "supported", but {backward_kernels[0].kernel} is listed under "autograd".'
240
+
241
+ return ParsedExternalYaml(
242
+ backend_key, autograd_key, class_name, cpp_namespace, backend_indices
243
+ )
244
+
245
+
246
+ def error_on_missing_kernels(
247
+ native_functions: Sequence[NativeFunction],
248
+ backend_indices: Dict[DispatchKey, BackendIndex],
249
+ backend_key: DispatchKey,
250
+ autograd_key: Optional[DispatchKey],
251
+ class_name: str,
252
+ kernel_defn_file_path: str,
253
+ full_codegen: Optional[List[OperatorName]] = None,
254
+ ) -> None:
255
+ try:
256
+ with open(kernel_defn_file_path) as f:
257
+ backend_defns = f.read()
258
+ except OSError as e:
259
+ raise AssertionError(
260
+ f"Unable to read from the specified impl_path file: {kernel_defn_file_path}"
261
+ ) from e
262
+
263
+ if full_codegen is None:
264
+ full_codegen = []
265
+
266
+ indices = [backend_indices[backend_key].index] + (
267
+ [] if autograd_key is None else [backend_indices[autograd_key].index]
268
+ )
269
+ # Quick mapping from each OperatorName used by the external backend
270
+ # to its backend kernel name
271
+ expected_backend_op_names: Dict[OperatorName, str] = dict(
272
+ list(
273
+ concatMap(
274
+ lambda index: [
275
+ (op_name, metadata.kernel) for op_name, metadata in index.items()
276
+ ],
277
+ indices,
278
+ )
279
+ )
280
+ )
281
+ expected_backend_native_funcs: List[NativeFunction] = [
282
+ f
283
+ for f in native_functions
284
+ if f.func.name in expected_backend_op_names.keys()
285
+ and f.func.name not in full_codegen
286
+ ]
287
+ expected_backend_kernel_name_counts: Dict[str, List[NativeFunction]] = defaultdict(
288
+ list
289
+ )
290
+ for native_f in expected_backend_native_funcs:
291
+ expected_backend_kernel_name_counts[
292
+ expected_backend_op_names[native_f.func.name]
293
+ ].append(native_f)
294
+
295
+ # This just looks for lines containing "foo(", and assumes that the kernel foo has been implemented.
296
+ # It might cause false negatives (we won't catch all cases), but that's ok - if we catch a missing kernel
297
+ # here, then we get a nicer error message. If we miss it, you get a linker error.
298
+ kernel_defn_regex = rf"(.*){class_name}::\s*([\w\d]*)\("
299
+ actual_backend_kernel_name_counts = Counter(
300
+ # A bit unwieldy (this could probably be moved into regex),
301
+ # but we don't want to include kernel names that come from function calls,
302
+ # like "return torch_xla::XLANativeFunctions::empty_strided_symint(...)".
303
+ # Easy check is to ignore any lines with colons before the class name.
304
+ [
305
+ y
306
+ for (x, y) in re.findall(kernel_defn_regex, backend_defns)
307
+ if not x.endswith(":")
308
+ ]
309
+ )
310
+
311
+ missing_kernels_err_msg = ""
312
+ for expected_name, funcs in expected_backend_kernel_name_counts.items():
313
+ expected_overload_count = len(funcs)
314
+ actual_overload_count = actual_backend_kernel_name_counts[expected_name]
315
+ if expected_overload_count != actual_overload_count:
316
+
317
+ def create_decl(f: NativeFunction) -> str:
318
+ with native_function_manager(f):
319
+ return DispatcherSignature.from_schema(f.func).decl()
320
+
321
+ expected_schemas_str = "\n".join([create_decl(f) for f in funcs])
322
+ missing_kernels_err_msg += f"""
323
+ {class_name} is missing a kernel definition for {expected_name}. We found {actual_overload_count} kernel(s) with that name,
324
+ but expected {expected_overload_count} kernel(s). The expected function schemas for the missing operator are:
325
+ {expected_schemas_str}
326
+
327
+ """
328
+ assert missing_kernels_err_msg == "", missing_kernels_err_msg
329
+
330
+
331
+ def main() -> None:
332
+ parser = argparse.ArgumentParser(description="Generate backend stub files")
333
+ parser.add_argument(
334
+ "-s",
335
+ "--source-yaml",
336
+ "--source_yaml",
337
+ help="path to source yaml file containing operator external definitions",
338
+ )
339
+ parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
340
+ parser.add_argument(
341
+ "--dry-run", "--dry_run", type=bool, default=False, help="output directory"
342
+ )
343
+ parser.add_argument(
344
+ "--impl-path",
345
+ "--impl_path",
346
+ type=str,
347
+ default=None,
348
+ help="path to the source C++ file containing kernel definitions",
349
+ )
350
+ options = parser.parse_args()
351
+
352
+ run(options.source_yaml, options.output_dir, options.dry_run, options.impl_path)
353
+
354
+
355
+ def gen_dispatchkey_nativefunc_headers(
356
+ fm: FileManager,
357
+ class_name: str,
358
+ cpp_namespace: str,
359
+ backend_indices: Dict[DispatchKey, BackendIndex],
360
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
361
+ backend_dispatch_key: DispatchKey,
362
+ autograd_dispatch_key: Optional[DispatchKey],
363
+ backend_name: str = "",
364
+ ) -> None:
365
+ assert class_name is not None
366
+ generated_comment = (
367
+ "Autogenerated file by gen_backend_stubs.py. Do not edit directly!"
368
+ )
369
+
370
+ # Convert to a set first to remove duplicate kernel names.
371
+ # Backends are allowed to repeat kernel names; only generate the declaration once!
372
+ # Sort for deterministic output.
373
+ backend_declarations = sorted(
374
+ set(
375
+ concatMap(
376
+ lambda f: dest.compute_native_function_declaration(
377
+ f, backend_indices[backend_dispatch_key]
378
+ ),
379
+ grouped_native_functions,
380
+ )
381
+ )
382
+ )
383
+ autograd_declarations = sorted(
384
+ set(
385
+ concatMap(
386
+ lambda f: []
387
+ if autograd_dispatch_key is None
388
+ else dest.compute_native_function_declaration(
389
+ f, backend_indices[autograd_dispatch_key]
390
+ ),
391
+ grouped_native_functions,
392
+ )
393
+ )
394
+ )
395
+
396
+ ns_helper = NamespaceHelper(cpp_namespace)
397
+ fm.write_with_template(
398
+ f"{backend_dispatch_key}NativeFunctions.h",
399
+ "DispatchKeyNativeFunctions.h",
400
+ lambda: {
401
+ "generated_comment": generated_comment,
402
+ "namespace_prologue": ns_helper.prologue,
403
+ "class_name": class_name,
404
+ "namespace_epilogue": ns_helper.epilogue,
405
+ "dispatch_declarations": backend_declarations + autograd_declarations,
406
+ "BackendName": backend_name,
407
+ "DispatchKey": backend_dispatch_key,
408
+ },
409
+ )
410
+
411
+
412
+ def gen_dispatcher_registrations(
413
+ fm: FileManager,
414
+ output_dir: str,
415
+ class_name: str,
416
+ backend_indices: Dict[DispatchKey, BackendIndex],
417
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
418
+ backend_dispatch_key: DispatchKey,
419
+ dispatch_key: DispatchKey,
420
+ selector: "SelectiveBuilder",
421
+ # build_in_tree is true for lazy TS backend and affects include paths, not used for external backends
422
+ build_in_tree: bool = False,
423
+ per_operator_headers: bool = False,
424
+ backend_name: str = "",
425
+ eager_registration: bool = True,
426
+ ) -> None:
427
+ headers = [
428
+ f"{output_dir}/{backend_dispatch_key}NativeFunctions.h",
429
+ ]
430
+ if build_in_tree:
431
+ external_backend_headers_str = "\n".join(f"#include <{h}>" for h in headers)
432
+ else:
433
+ external_backend_headers_str = "\n".join(f'#include "{h}"' for h in headers)
434
+
435
+ assert class_name is not None
436
+ backend_index = backend_indices[dispatch_key]
437
+
438
+ dispatch_registrations_body = list(
439
+ concatMap(
440
+ dest.RegisterDispatchKey(
441
+ backend_index,
442
+ Target.REGISTRATION,
443
+ selector,
444
+ rocm=False,
445
+ symint=True,
446
+ class_method_name=f"{class_name}",
447
+ skip_dispatcher_op_registration=False,
448
+ ),
449
+ grouped_native_functions,
450
+ )
451
+ )
452
+ newline = "\n"
453
+ ns_helper = NamespaceHelper(namespace_str="at")
454
+ deferred_dispatch_registrations = ""
455
+ static_init_dispatch_registrations = ""
456
+ if eager_registration:
457
+ static_template = CodeTemplate(
458
+ """\
459
+ TORCH_LIBRARY_IMPL(aten, $dispatch_key, m) {
460
+ $dispatch_registrations_body
461
+ };"""
462
+ )
463
+ static_init_dispatch_registrations = static_template.substitute(
464
+ dispatch_key=dispatch_key,
465
+ dispatch_registrations_body=dispatch_registrations_body,
466
+ )
467
+ else:
468
+ deferred_template = CodeTemplate(
469
+ """\
470
+ TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions();
471
+ TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions() {
472
+ static auto m = MAKE_TORCH_LIBRARY_IMPL(aten, $dispatch_key);
473
+ $dispatch_registrations_body
474
+ }"""
475
+ )
476
+ deferred_dispatch_registrations = deferred_template.substitute(
477
+ backend_name=backend_name,
478
+ dispatch_key=dispatch_key,
479
+ dispatch_registrations_body=dispatch_registrations_body,
480
+ )
481
+
482
+ fm.write_with_template(
483
+ f"Register{dispatch_key}.cpp",
484
+ "RegisterDispatchKey.cpp",
485
+ lambda: {
486
+ "extra_cuda_headers": "",
487
+ "external_backend_headers": external_backend_headers_str,
488
+ "ops_headers": "#include <ATen/Functions.h>"
489
+ if not per_operator_headers
490
+ else "",
491
+ "DispatchKey": dispatch_key,
492
+ "dispatch_namespace": dispatch_key.lower(),
493
+ "dispatch_headers": dest.gen_registration_headers(
494
+ backend_index, per_operator_headers=per_operator_headers, rocm=False
495
+ ),
496
+ "dispatch_definitions": fm.substitute_with_template(
497
+ "RegisterDispatchDefinitions.ini",
498
+ lambda: {
499
+ "ns_prologue": ns_helper.prologue,
500
+ "ns_epilogue": ns_helper.epilogue,
501
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
502
+ "deferred_dispatch_registrations": deferred_dispatch_registrations,
503
+ "dispatch_helpers": dest.gen_registration_helpers(backend_index),
504
+ "dispatch_namespace": dispatch_key.lower(),
505
+ "dispatch_namespaced_definitions": "",
506
+ "dispatch_anonymous_definitions": list(
507
+ concatMap(
508
+ dest.RegisterDispatchKey(
509
+ backend_index,
510
+ Target.ANONYMOUS_DEFINITION,
511
+ selector,
512
+ rocm=False,
513
+ symint=True,
514
+ class_method_name=f"{class_name}",
515
+ skip_dispatcher_op_registration=False,
516
+ ),
517
+ grouped_native_functions,
518
+ )
519
+ ),
520
+ },
521
+ ).split(newline),
522
+ },
523
+ )
524
+
525
+
526
+ def run(
527
+ source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str] = None
528
+ ) -> None:
529
+ # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
530
+ pytorch_root = pathlib.Path(__file__).parent.parent.absolute()
531
+ template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")
532
+
533
+ def make_file_manager(install_dir: str) -> FileManager:
534
+ return FileManager(
535
+ install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
536
+ )
537
+
538
+ fm = make_file_manager(output_dir)
539
+
540
+ native_yaml_path = os.path.join(
541
+ pytorch_root, "aten/src/ATen/native/native_functions.yaml"
542
+ )
543
+ tags_yaml_path = os.path.join(pytorch_root, "aten/src/ATen/native/tags.yaml")
544
+ parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
545
+ native_functions, backend_indices = (
546
+ parsed_yaml.native_functions,
547
+ parsed_yaml.backend_indices,
548
+ )
549
+ grouped_native_functions = get_grouped_native_functions(native_functions)
550
+ parsed_backend_yaml = parse_backend_yaml(
551
+ source_yaml, grouped_native_functions, backend_indices
552
+ )
553
+ backend_key = parsed_backend_yaml.backend_key
554
+ autograd_key = parsed_backend_yaml.autograd_key
555
+ cpp_namespace = parsed_backend_yaml.cpp_namespace
556
+ class_name = parsed_backend_yaml.class_name
557
+ backend_indices = parsed_backend_yaml.backend_indices
558
+
559
+ selector = SelectiveBuilder.get_nop_selector()
560
+
561
+ if backend_key is None:
562
+ # This could be useful if a backend wants to quickly set up a noop yaml file but doesn't have any kernels ready yet.
563
+ return
564
+
565
+ if class_name is None:
566
+ # class_name is an optional argument to backend yaml file.
567
+ # if specified it allows an external backend to override
568
+ # the name of the class that all generated kernel definitions live under.
569
+ # if not specified, its value is given as native_function_class_name.
570
+ class_name = backend_indices[backend_key].native_function_class_name()
571
+ assert class_name is not None
572
+
573
+ if impl_path is not None:
574
+ error_on_missing_kernels(
575
+ native_functions,
576
+ backend_indices,
577
+ backend_key,
578
+ autograd_key,
579
+ class_name,
580
+ impl_path,
581
+ )
582
+
583
+ gen_dispatchkey_nativefunc_headers(
584
+ fm,
585
+ class_name,
586
+ cpp_namespace,
587
+ backend_indices,
588
+ grouped_native_functions,
589
+ backend_key,
590
+ autograd_key,
591
+ )
592
+
593
+ for dispatch_key in (
594
+ [backend_key] if autograd_key is None else [backend_key, autograd_key]
595
+ ):
596
+ gen_dispatcher_registrations(
597
+ fm,
598
+ output_dir,
599
+ class_name,
600
+ backend_indices,
601
+ grouped_native_functions,
602
+ backend_key,
603
+ dispatch_key,
604
+ selector,
605
+ )
606
+
607
+
608
+ if __name__ == "__main__":
609
+ main()
env-llmeval/lib/python3.10/site-packages/torchgen/gen_executorch.py ADDED
@@ -0,0 +1,978 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pathlib
4
+ from collections import defaultdict
5
+ from dataclasses import dataclass
6
+ from typing import Any, Callable, Dict, List, Optional, Sequence, TextIO, Tuple, Union
7
+
8
+ import yaml
9
+
10
+ # Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
11
+ from torchgen import dest
12
+ from torchgen.api import cpp as aten_cpp
13
+ from torchgen.api.types import CppSignature, CppSignatureGroup, CType, NamedCType
14
+ from torchgen.context import (
15
+ method_with_native_function,
16
+ method_with_nested_native_function,
17
+ with_native_function_and_index,
18
+ )
19
+ from torchgen.executorch.api import et_cpp
20
+ from torchgen.executorch.api.custom_ops import (
21
+ ComputeNativeFunctionStub,
22
+ gen_custom_ops_registration,
23
+ )
24
+ from torchgen.executorch.api.types import contextArg, ExecutorchCppSignature
25
+ from torchgen.executorch.api.unboxing import Unboxing
26
+ from torchgen.executorch.model import ETKernelIndex, ETKernelKey, ETParsedYaml
27
+ from torchgen.executorch.parse import ET_FIELDS, parse_et_yaml, parse_et_yaml_struct
28
+ from torchgen.gen import (
29
+ get_custom_build_selector,
30
+ get_native_function_declarations,
31
+ get_native_function_declarations_from_ns_grouped_kernels,
32
+ get_native_function_schema_registrations,
33
+ LineLoader,
34
+ parse_native_yaml,
35
+ )
36
+ from torchgen.model import (
37
+ BackendIndex,
38
+ BackendMetadata,
39
+ DEFAULT_KERNEL_NAMESPACE,
40
+ DispatchKey,
41
+ FunctionSchema,
42
+ Location,
43
+ NativeFunction,
44
+ NativeFunctionsGroup,
45
+ OperatorName,
46
+ Variant,
47
+ )
48
+ from torchgen.selective_build.selector import SelectiveBuilder
49
+ from torchgen.utils import (
50
+ context,
51
+ FileManager,
52
+ make_file_manager,
53
+ mapMaybe,
54
+ NamespaceHelper,
55
+ )
56
+
57
+
58
+ def _sig_decl_wrapper(sig: Union[CppSignature, ExecutorchCppSignature]) -> str:
59
+ """
60
+ A wrapper function to basically get `sig.decl(include_context=True)`.
61
+ For ATen kernel, the codegen has no idea about ET contextArg, so we
62
+ use this wrapper to add it.
63
+ """
64
+ if isinstance(sig, ExecutorchCppSignature):
65
+ return sig.decl()
66
+
67
+ returns_type = aten_cpp.returns_type(sig.func.returns).cpp_type()
68
+ cpp_args = [a.decl() for a in sig.arguments()]
69
+ cpp_args_str = ", ".join([contextArg.decl()] + cpp_args)
70
+ sig_decl = f"{returns_type} {sig.name()}({cpp_args_str})"
71
+ return sig_decl
72
+
73
+
74
+ def static_dispatch(
75
+ sig: Union[CppSignature, ExecutorchCppSignature],
76
+ f: NativeFunction,
77
+ backend_indices: List[BackendIndex],
78
+ ) -> str:
79
+ """
80
+ For a given `NativeFunction`, find out the corresponding native function and dispatch to it. If zero or more than one
81
+ native function exists, error out. A simplified version of register_dispatch_key.py
82
+ Arguments:
83
+ sig: A CppSignature for this native function we want to use.
84
+ f: NativeFunction to generate static dispatch.
85
+ backend_indices: All available backends.
86
+ Return:
87
+ C++ code to call backend-specific functions, e.g., "return at::native::add(self, other, scale);"
88
+ """
89
+ if len(backend_indices) == 0 or f.manual_kernel_registration:
90
+ return ""
91
+
92
+ backends = [b for b in backend_indices if b.has_kernel(f)]
93
+ static_block = None
94
+ if len(backends) == 1:
95
+ backend_metadata = backends[0].get_kernel(f)
96
+ if backend_metadata:
97
+ args = ", ".join(a.name for a in sig.arguments())
98
+ # Here we are assuming there's no difference between CppSignature and NativeSignature for Executorch.
99
+ static_block = f"return ::{backend_metadata.cpp_namespace}::{backend_metadata.kernel}({args});"
100
+ else:
101
+ static_block = f"""
102
+ ET_ASSERT_UNREACHABLE_MSG("The number of native function(s) binding to {f.func.name} is {len(backends)}.");
103
+ """
104
+ return f"""
105
+ // {f.namespace}::{f.func}
106
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
107
+ {static_block}
108
+ }}
109
+ """
110
+
111
+
112
+ # Generates Functions.h, which provides the functional public C++ API,
113
+ # and the scaffolding to call into the dispatcher from these functions.
114
+ @dataclass(frozen=True)
115
+ class ComputeFunction:
116
+ static_dispatch_backend_indices: List[BackendIndex]
117
+
118
+ selector: SelectiveBuilder
119
+
120
+ use_aten_lib: bool
121
+
122
+ is_custom_op: Callable[[NativeFunction], bool]
123
+
124
+ @method_with_native_function
125
+ def __call__(self, f: NativeFunction) -> Optional[str]:
126
+ if not self.selector.is_root_operator(f"{f.namespace}::{f.func.name}"):
127
+ return None
128
+ if Variant.function not in f.variants:
129
+ return None
130
+ sig: Union[CppSignature, ExecutorchCppSignature] = (
131
+ CppSignatureGroup.from_native_function(
132
+ f, method=False, fallback_binding=f.manual_cpp_binding
133
+ ).most_faithful_signature()
134
+ if self.use_aten_lib
135
+ else ExecutorchCppSignature.from_native_function(f)
136
+ )
137
+ if self.use_aten_lib and not self.is_custom_op(f):
138
+ comma = ", "
139
+
140
+ return f"""
141
+ // {f.namespace}::{f.func}
142
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
143
+ return at::{sig.name()}({comma.join(e.name for e in sig.arguments())});
144
+ }}
145
+ """
146
+
147
+ else:
148
+ return static_dispatch(
149
+ sig,
150
+ f,
151
+ backend_indices=self.static_dispatch_backend_indices,
152
+ )
153
+
154
+
155
+ # Generates RegisterCodegenUnboxedKernels.cpp.
156
+ @dataclass(frozen=True)
157
+ class ComputeCodegenUnboxedKernels:
158
+ selector: SelectiveBuilder
159
+
160
+ use_aten_lib: bool
161
+
162
+ @method_with_nested_native_function
163
+ def __call__(
164
+ self,
165
+ unbox_kernel_entry: Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]],
166
+ ) -> str:
167
+ f: NativeFunction = unbox_kernel_entry[0]
168
+ kernel_key: Union[ETKernelKey, List[ETKernelKey]] = unbox_kernel_entry[1][0]
169
+ kernel_meta: BackendMetadata = unbox_kernel_entry[1][1]
170
+
171
+ op_name = f"{f.namespace}::{f.func.name}"
172
+ if not self.selector.is_root_operator(op_name):
173
+ return ""
174
+
175
+ if not isinstance(kernel_key, list):
176
+ kernel_key = [kernel_key]
177
+ used_kernel_keys = self.selector.et_get_selected_kernels(
178
+ op_name, [k.to_native_string() for k in kernel_key]
179
+ )
180
+ if not used_kernel_keys:
181
+ return ""
182
+ sig: Union[CppSignature, ExecutorchCppSignature]
183
+ argument_type_gen: Callable[..., NamedCType]
184
+ return_type_gen: Callable[..., CType]
185
+ if self.use_aten_lib:
186
+ sig = CppSignatureGroup.from_native_function(
187
+ f, method=False, fallback_binding=f.manual_cpp_binding
188
+ ).most_faithful_signature()
189
+ argument_type_gen = aten_cpp.argumenttype_type
190
+ return_type_gen = aten_cpp.returns_type
191
+ arguments = sig.arguments()
192
+ kernel_call = f"torch::executor::{f.namespace}::{sig.name()}"
193
+ else:
194
+ sig = ExecutorchCppSignature.from_native_function(f)
195
+ argument_type_gen = et_cpp.argumenttype_type
196
+ return_type_gen = et_cpp.returns_type
197
+ arguments = sig.arguments(include_context=False)
198
+ kernel_call = f"{kernel_meta.cpp_namespace}::{kernel_meta.kernel}"
199
+ # parse arguments into C++ code
200
+ binding_list, code_list = Unboxing(
201
+ argument_type_gen=argument_type_gen
202
+ ).convert_arguments(arguments)
203
+
204
+ # for each C++ argument, generate the conversion code
205
+ code_connector = "\n\t"
206
+ arg_connector = ", "
207
+
208
+ args_str = f"{arg_connector.join(e.name for e in binding_list)}"
209
+ event_tracer_output_logging = ""
210
+ output_ids = []
211
+
212
+ if len(f.func.returns) == 0:
213
+ if len(f.func.arguments.out) == 0:
214
+ raise Exception(
215
+ f"Can't handle native function {f.func} with no returns and no out yet."
216
+ )
217
+ out = f.func.arguments.out[0]
218
+ return_assignment = f"""stack[{len(binding_list)}] = &{out.name};"""
219
+ ret_prefix = ""
220
+ output_ids = [len(binding_list)]
221
+ else:
222
+ if len(f.func.arguments.out) == 0:
223
+ return_assignment = (
224
+ f"""*stack[{len(binding_list)}] = EValue(result_);"""
225
+ )
226
+ ret_prefix = return_type_gen(f.func.returns).cpp_type() + " result_ = "
227
+ output_ids = [len(binding_list)]
228
+ else:
229
+ return_assignment = ""
230
+ ret_prefix = ""
231
+ output_ids = [
232
+ len(binding_list) - (i + 1)
233
+ for i in reversed(range(len(f.func.arguments.out)))
234
+ ]
235
+
236
+ for output_id in output_ids:
237
+ event_tracer_output_logging += (
238
+ f"internal::event_tracer_log_evalue("
239
+ f"context.internal_event_tracer(), "
240
+ f"*stack[{output_id}]);\n"
241
+ )
242
+
243
+ newline = "\n "
244
+ return "\n".join(
245
+ [
246
+ f"""
247
+ Kernel(
248
+ "{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != 'default' else ''}
249
+ []({contextArg.defn()}, EValue** stack) {{
250
+ {code_connector.join(code_list)}
251
+
252
+ internal::EventTracerProfileScope event_tracer_scope(context.internal_event_tracer(), "native_call_{f.func.name}");
253
+ EXECUTORCH_SCOPE_PROF("native_call_{f.func.name}");
254
+ {ret_prefix}{kernel_call}(context, {args_str});
255
+ {event_tracer_output_logging}
256
+ {return_assignment}
257
+ }}
258
+ ),
259
+ """
260
+ for k in used_kernel_keys
261
+ ]
262
+ )
263
+
264
+
265
+ def gen_unboxing(
266
+ *,
267
+ native_functions: Sequence[NativeFunction],
268
+ cpu_fm: FileManager,
269
+ selector: SelectiveBuilder,
270
+ use_aten_lib: bool,
271
+ kernel_index: ETKernelIndex,
272
+ manual_registration: bool,
273
+ ) -> None:
274
+ # Iterable type for write_sharded is a Tuple of (native_function, (kernel_key, metadata))
275
+ def key_func(
276
+ item: Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]]
277
+ ) -> str:
278
+ return item[0].root_name + ":" + item[1][0].to_native_string()
279
+
280
+ items: List[Tuple[NativeFunction, Tuple[ETKernelKey, BackendMetadata]]] = [
281
+ (native_function, (kernel_key, metadata))
282
+ for native_function in native_functions
283
+ for kernel_key, metadata in kernel_index.get_kernels(native_function).items()
284
+ ]
285
+
286
+ header = ["Functions.h" if use_aten_lib else "NativeFunctions.h"]
287
+ filename = (
288
+ "RegisterKernels.cpp"
289
+ if manual_registration
290
+ else "RegisterCodegenUnboxedKernels.cpp"
291
+ )
292
+ cpu_fm.write_sharded(
293
+ filename,
294
+ items,
295
+ key_fn=key_func,
296
+ env_callable=lambda unbox_kernel_entry: {
297
+ "unboxed_kernels": [
298
+ ComputeCodegenUnboxedKernels(selector, use_aten_lib)(unbox_kernel_entry)
299
+ ],
300
+ "fn_header": header
301
+ if unbox_kernel_entry == items[0]
302
+ else [], # Only write header once
303
+ },
304
+ num_shards=1,
305
+ sharded_keys={"unboxed_kernels", "fn_header"},
306
+ )
307
+
308
+
309
+ @with_native_function_and_index # type: ignore[arg-type]
310
+ def compute_native_function_declaration(
311
+ g: Union[NativeFunctionsGroup, NativeFunction], kernel_index: ETKernelIndex
312
+ ) -> List[str]:
313
+ assert isinstance(g, NativeFunction)
314
+ sig = ExecutorchCppSignature.from_native_function(f=g)
315
+ metadata_list = kernel_index.get_kernels(g).values()
316
+ if metadata_list is None:
317
+ return []
318
+ prefix = "TORCH_API"
319
+
320
+ # for kernels in lean mode, we declare two versions, one with context and one without.
321
+ # In the end we will cleanup the unused one.
322
+ def gen_decl(metadata: BackendMetadata, include_context: bool) -> str:
323
+ return f"{prefix} {sig.decl(name=metadata.kernel, include_context=include_context)};"
324
+
325
+ return [
326
+ gen_decl(metadata, include_context)
327
+ for include_context in [False, True]
328
+ for metadata in metadata_list
329
+ ]
330
+
331
+
332
+ def gen_functions_declarations(
333
+ *,
334
+ native_functions: Sequence[NativeFunction],
335
+ kernel_index: ETKernelIndex,
336
+ selector: SelectiveBuilder,
337
+ use_aten_lib: bool,
338
+ custom_ops_native_functions: Optional[Sequence[NativeFunction]] = None,
339
+ ) -> str:
340
+ """
341
+ Generates namespace separated C++ function API inline declaration/definitions.
342
+ Native functions are grouped by namespaces and the generated code is wrapped inside
343
+ namespace blocks.
344
+
345
+ E.g., for `custom_1::foo.out` in yaml file we will generate a C++ API as a symbol
346
+ in `torch::executor::custom_1::foo_out`. This way we avoid symbol conflict when
347
+ the other `custom_2::foo.out` is available.
348
+ """
349
+
350
+ # convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
351
+ # TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
352
+
353
+ dispatch_key = DispatchKey.CPU
354
+ backend_index = kernel_index._to_backend_index()
355
+
356
+ ns_grouped_functions = defaultdict(list)
357
+ for native_function in native_functions:
358
+ ns_grouped_functions[native_function.namespace].append(native_function)
359
+ functions_declarations = ""
360
+ newline = "\n"
361
+ for namespace in ns_grouped_functions:
362
+ ns_helper = NamespaceHelper(
363
+ namespace_str=namespace,
364
+ entity_name="",
365
+ max_level=3,
366
+ )
367
+ declarations = list(
368
+ mapMaybe(
369
+ ComputeFunction(
370
+ static_dispatch_backend_indices=[backend_index],
371
+ selector=selector,
372
+ use_aten_lib=use_aten_lib,
373
+ is_custom_op=lambda f: custom_ops_native_functions is not None
374
+ and f in custom_ops_native_functions,
375
+ ),
376
+ ns_grouped_functions[namespace],
377
+ )
378
+ )
379
+ functions_declarations += f"""
380
+ {ns_helper.prologue}
381
+ {newline.join(declarations)}
382
+ {ns_helper.epilogue}
383
+ """
384
+ return functions_declarations
385
+
386
+
387
+ def get_ns_grouped_kernels(
388
+ *,
389
+ native_functions: Sequence[NativeFunction],
390
+ kernel_index: ETKernelIndex,
391
+ native_function_decl_gen: Callable[
392
+ [
393
+ Union[NativeFunctionsGroup, NativeFunction],
394
+ ETKernelIndex,
395
+ ],
396
+ List[str],
397
+ ],
398
+ ) -> Dict[str, List[str]]:
399
+ ns_grouped_kernels: Dict[str, List[str]] = defaultdict(list)
400
+ for f in native_functions:
401
+ native_function_namespaces = set()
402
+ op_kernels = kernel_index.get_kernels(f)
403
+ for backend_metadata in op_kernels.values():
404
+ if backend_metadata:
405
+ namespace = backend_metadata.cpp_namespace
406
+ native_function_namespaces.add(namespace)
407
+ else:
408
+ namespace = DEFAULT_KERNEL_NAMESPACE
409
+ assert (
410
+ len(native_function_namespaces) <= 1
411
+ ), f"Codegen only supports one namespace per operator, got {native_function_namespaces}"
412
+ ns_grouped_kernels[namespace].extend(
413
+ native_function_decl_gen(f, kernel_index)
414
+ )
415
+ return ns_grouped_kernels
416
+
417
+
418
+ def gen_headers(
419
+ *,
420
+ native_functions: Sequence[NativeFunction],
421
+ gen_custom_ops_header: bool,
422
+ custom_ops_native_functions: Sequence[NativeFunction],
423
+ selector: SelectiveBuilder,
424
+ kernel_index: ETKernelIndex,
425
+ cpu_fm: FileManager,
426
+ use_aten_lib: bool,
427
+ ) -> None:
428
+ """Generate headers.
429
+
430
+ Args:
431
+ native_functions (Sequence[NativeFunction]): a collection of NativeFunction for ATen ops.
432
+ gen_custom_ops_header (bool): whether we should generate CustomOpsNativeFunctions.h
433
+ custom_ops_native_functions (Sequence[NativeFunction]): a collection of NativeFunction for custom ops.
434
+ kernel_index (ETKernelIndex): kernel collection
435
+ cpu_fm (FileManager): file manager manages output stream
436
+ use_aten_lib (bool): whether we are generating for PyTorch types or Executorch types.
437
+ """
438
+ aten_headers = ["#include <ATen/Functions.h>"]
439
+ backend_indices = {DispatchKey.CPU: kernel_index._to_backend_index()}
440
+ if gen_custom_ops_header:
441
+ cpu_fm.write_with_template(
442
+ "CustomOpsNativeFunctions.h",
443
+ "NativeFunctions.h",
444
+ lambda: {
445
+ "nativeFunctions_declarations": get_native_function_declarations(
446
+ grouped_native_functions=custom_ops_native_functions,
447
+ backend_indices=backend_indices,
448
+ native_function_decl_gen=dest.compute_native_function_declaration,
449
+ ),
450
+ "headers": [
451
+ "#include <ATen/ATen.h>",
452
+ "#include <torch/torch.h>",
453
+ ],
454
+ },
455
+ )
456
+ aten_headers.append('#include "CustomOpsNativeFunctions.h"')
457
+ cpu_fm.write(
458
+ "Functions.h",
459
+ lambda: {
460
+ "static_dispatch_extra_headers": aten_headers
461
+ if use_aten_lib
462
+ else ['#include "NativeFunctions.h"'],
463
+ "Functions_declarations": gen_functions_declarations(
464
+ native_functions=native_functions,
465
+ kernel_index=kernel_index,
466
+ selector=selector,
467
+ use_aten_lib=use_aten_lib,
468
+ custom_ops_native_functions=custom_ops_native_functions,
469
+ ),
470
+ },
471
+ )
472
+ cpu_fm.write(
473
+ "RegisterKernels.h",
474
+ lambda: {
475
+ "generated_comment": "@" + "generated by torchgen/gen_executorch.py",
476
+ },
477
+ )
478
+ headers = {
479
+ "headers": [
480
+ "#include <executorch/runtime/core/exec_aten/exec_aten.h> // at::Tensor etc.",
481
+ "#include <executorch/codegen/macros.h> // TORCH_API",
482
+ "#include <executorch/runtime/kernel/kernel_runtime_context.h>",
483
+ ],
484
+ }
485
+ if use_aten_lib:
486
+ cpu_fm.write(
487
+ "NativeFunctions.h",
488
+ lambda: dict(
489
+ {
490
+ "nativeFunctions_declarations": get_native_function_declarations(
491
+ grouped_native_functions=native_functions,
492
+ backend_indices=backend_indices,
493
+ native_function_decl_gen=dest.compute_native_function_declaration,
494
+ ),
495
+ },
496
+ **headers,
497
+ ),
498
+ )
499
+ else:
500
+ ns_grouped_kernels = get_ns_grouped_kernels(
501
+ native_functions=native_functions,
502
+ kernel_index=kernel_index,
503
+ native_function_decl_gen=compute_native_function_declaration, # type: ignore[arg-type]
504
+ )
505
+ cpu_fm.write(
506
+ "NativeFunctions.h",
507
+ lambda: dict(
508
+ {
509
+ "nativeFunctions_declarations": get_native_function_declarations_from_ns_grouped_kernels(
510
+ ns_grouped_kernels=ns_grouped_kernels,
511
+ ),
512
+ },
513
+ **headers,
514
+ ),
515
+ )
516
+
517
+
518
+ def gen_custom_ops(
519
+ *,
520
+ native_functions: Sequence[NativeFunction],
521
+ selector: SelectiveBuilder,
522
+ kernel_index: ETKernelIndex,
523
+ cpu_fm: FileManager,
524
+ rocm: bool,
525
+ ) -> None:
526
+ dispatch_key = DispatchKey.CPU
527
+ (
528
+ anonymous_definition,
529
+ static_init_dispatch_registrations,
530
+ ) = gen_custom_ops_registration(
531
+ native_functions=native_functions,
532
+ selector=selector,
533
+ kernel_index=kernel_index,
534
+ rocm=rocm,
535
+ )
536
+ cpu_fm.write_with_template(
537
+ f"Register{dispatch_key}CustomOps.cpp",
538
+ "RegisterDispatchKeyCustomOps.cpp",
539
+ lambda: {
540
+ "ops_headers": '#include "CustomOpsNativeFunctions.h"',
541
+ "DispatchKey": dispatch_key,
542
+ "dispatch_namespace": dispatch_key.lower(),
543
+ "dispatch_namespaced_definitions": "",
544
+ "dispatch_anonymous_definitions": anonymous_definition,
545
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
546
+ },
547
+ )
548
+ cpu_fm.write_with_template(
549
+ f"Register{dispatch_key}Stub.cpp",
550
+ "RegisterDispatchKeyCustomOps.cpp",
551
+ lambda: {
552
+ "ops_headers": "",
553
+ "DispatchKey": dispatch_key,
554
+ "dispatch_namespace": dispatch_key.lower(),
555
+ "dispatch_namespaced_definitions": "",
556
+ "dispatch_anonymous_definitions": list(
557
+ mapMaybe(ComputeNativeFunctionStub(), native_functions)
558
+ ),
559
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
560
+ },
561
+ )
562
+
563
+ (
564
+ aten_schema_registrations,
565
+ schema_registrations,
566
+ ) = get_native_function_schema_registrations(
567
+ native_functions=native_functions,
568
+ schema_selector=selector,
569
+ )
570
+ cpu_fm.write(
571
+ "RegisterSchema.cpp",
572
+ lambda: {
573
+ "schema_registrations": schema_registrations,
574
+ "aten_schema_registrations": aten_schema_registrations,
575
+ },
576
+ )
577
+
578
+
579
+ def translate_native_yaml(
580
+ tags_yaml_path: str,
581
+ aten_yaml_path: str,
582
+ native_yaml_path: Optional[str],
583
+ use_aten_lib: bool,
584
+ out_file: TextIO,
585
+ ) -> None:
586
+ """Translates Executorch DSL dialect to use the same syntax as
587
+ native_functions.yaml. The major difference is that Executorch DSL dialect
588
+ supports "op" key, where it refers to the operator name in native_functions.yaml.
589
+
590
+ For example, a functions.yaml may have the following entry:
591
+
592
+ - op: add.out
593
+ ...
594
+
595
+ It needs to be translated to the following:
596
+
597
+ - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
598
+ ...
599
+
600
+ We go in aten_yaml_path and find the operator schema for "add.out" and add it
601
+ to the original functions.yaml. We also add required field "variants", where for
602
+ Executorch it will always be "function".
603
+
604
+ For ATen mode we don't have to do the translation because native_yaml_path is
605
+ the same as native_functions.yaml.
606
+
607
+ Args:
608
+ tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
609
+ It is not optional.
610
+ aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
611
+ native_yaml_path: Path to a functions.yaml file to parse.
612
+ If the path does not exist in the filesystem, it is treated as an
613
+ empty file. If `custom_ops_yaml_path` exists, the contents of that
614
+ file are appended to the yaml input to be parsed.
615
+ use_aten_lib: We use this flag to determine if we want to generate native
616
+ functions. In ATen mode we should generate out= variants.
617
+ out_file: The IO object that we are writing into.
618
+ Returns:
619
+ None
620
+ """
621
+ if use_aten_lib:
622
+ with open(aten_yaml_path) as aten_yaml:
623
+ out_file.writelines(aten_yaml.readlines())
624
+ return
625
+
626
+ native_functions, persisted_fields = parse_et_yaml(
627
+ aten_yaml_path,
628
+ tags_yaml_path,
629
+ None,
630
+ skip_native_fns_gen=False,
631
+ )
632
+
633
+ func_to_scoped_name: Dict[FunctionSchema, str] = {
634
+ f.func: f"{f.namespace}::{f.func.name}" for f in native_functions
635
+ }
636
+ op_to_scoped_name: Dict[OperatorName, str] = {
637
+ func.name: name for func, name in func_to_scoped_name.items()
638
+ }
639
+
640
+ schema_dict = {name: str(func) for func, name in func_to_scoped_name.items()}
641
+ kernel_persist_dict: Dict[str, Dict[str, Any]] = {
642
+ op_to_scoped_name[op]: v for op, v in persisted_fields.items()
643
+ }
644
+
645
+ if (
646
+ not native_yaml_path
647
+ or not os.path.exists(native_yaml_path)
648
+ or os.stat(native_yaml_path).st_size == 0
649
+ ):
650
+ return
651
+ with open(native_yaml_path) as native_yaml:
652
+ native_es = yaml.load(native_yaml, Loader=LineLoader)
653
+ if not native_es:
654
+ return
655
+ for e in native_es:
656
+ assert isinstance(e.get("__line__"), int), e
657
+ loc = Location(native_yaml_path, e.pop("__line__"))
658
+ with context(lambda: f"in {loc}:\n "):
659
+ if "variants" not in e:
660
+ e["variants"] = "function"
661
+ if "func" in e:
662
+ continue
663
+ assert isinstance(e.get("op"), str), e
664
+ opname = e.pop("op")
665
+ if "::" not in opname:
666
+ opname = "aten::" + opname
667
+ assert opname in schema_dict
668
+ e["func"] = schema_dict.get(opname)
669
+
670
+ # Write out persisted kernel information
671
+ if opname in kernel_persist_dict:
672
+ for k, v in kernel_persist_dict[opname].items():
673
+ e[k] = v
674
+
675
+ yaml.dump(native_es, out_file, width=1000)
676
+
677
+
678
+ def parse_yaml(
679
+ path: Optional[str],
680
+ tags_yaml_path: str,
681
+ function_filter: Callable[[NativeFunction], bool],
682
+ skip_native_fns_gen: bool = False,
683
+ ) -> Tuple[
684
+ List[NativeFunction],
685
+ Union[Dict[DispatchKey, Dict[OperatorName, BackendMetadata]], ETKernelIndex],
686
+ ]:
687
+ if path and os.path.exists(path) and os.stat(path).st_size > 0:
688
+ with open(path) as f:
689
+ es = yaml.load(f, Loader=LineLoader)
690
+
691
+ # Check for kernel index structure
692
+ kernel_index = (
693
+ parse_et_yaml_struct(es) if any("kernels" in e for e in es) else None
694
+ )
695
+
696
+ # Remove ET specific fields from entries for BC compatibility
697
+ for entry in es:
698
+ for field in ET_FIELDS:
699
+ entry.pop(field, None)
700
+
701
+ parsed_yaml = parse_native_yaml(
702
+ path,
703
+ tags_yaml_path,
704
+ None,
705
+ skip_native_fns_gen=skip_native_fns_gen,
706
+ loaded_yaml=es,
707
+ )
708
+ native_functions = list(filter(function_filter, parsed_yaml.native_functions))
709
+ op_names = [f.func.name for f in native_functions]
710
+
711
+ # (1) Return ETKernelIndex if kernel index is present
712
+ if kernel_index is not None:
713
+ filtered_index = {
714
+ op_name: kernel_mapping
715
+ for op_name, kernel_mapping in kernel_index.index.items()
716
+ if op_name in op_names
717
+ }
718
+ return native_functions, ETKernelIndex(index=filtered_index)
719
+
720
+ # (2) Return BackendIndices if kernel index is absent
721
+ def map_index(
722
+ m: Dict[OperatorName, BackendMetadata]
723
+ ) -> Dict[OperatorName, BackendMetadata]:
724
+ return {op: m[op] for op in m if op in op_names}
725
+
726
+ backend_indices = {
727
+ k: map_index(b.index) for (k, b) in parsed_yaml.backend_indices.items()
728
+ }
729
+
730
+ return native_functions, backend_indices
731
+ else:
732
+ return [], {}
733
+
734
+
735
+ def parse_yaml_files(
736
+ tags_yaml_path: str,
737
+ aten_yaml_path: str,
738
+ native_yaml_path: Optional[str],
739
+ custom_ops_yaml_path: Optional[str],
740
+ selector: SelectiveBuilder,
741
+ use_aten_lib: bool,
742
+ ) -> Tuple[ETParsedYaml, Optional[ETParsedYaml]]:
743
+ """Parses functions.yaml and custom_ops.yaml files.
744
+
745
+ Args:
746
+ tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
747
+ It is not optional.
748
+ aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
749
+ native_yaml_path: Path to a functions.yaml file to parse.
750
+ If the path does not exist in the filesystem, it is treated as an
751
+ empty file. If `custom_ops_yaml_path` exists, the contents of that
752
+ file are appended to the yaml input to be parsed.
753
+ custom_ops_yaml_path: Path to a custom_ops.yaml file to parse. If
754
+ the path does not exist in the filesystem, it is ignored.
755
+ selector: For selective build.
756
+ use_aten_lib: We use this flag to determine if we want to generate native
757
+ functions. In ATen mode we should generate out= variants.
758
+ Returns:
759
+ A tuple with two elements:
760
+ [0]: The parsed results of concatenating the contents of
761
+ `native_yaml_path` and `custom_ops_yaml_path`.
762
+ [1]: The parsed results of the contents of `custom_ops_yaml_path`, if
763
+ present. If not present, None.
764
+ """
765
+ import tempfile
766
+
767
+ # only include selected ops, this is because we want to avoid
768
+ def function_filter(f: NativeFunction) -> bool:
769
+ return selector.is_native_function_selected(f)
770
+
771
+ with tempfile.TemporaryDirectory() as tmpdirname:
772
+ translated_yaml_path = os.path.join(tmpdirname, "translated.yaml")
773
+ with open(translated_yaml_path, "w") as translated:
774
+ translate_native_yaml(
775
+ tags_yaml_path,
776
+ aten_yaml_path,
777
+ native_yaml_path,
778
+ use_aten_lib,
779
+ translated,
780
+ )
781
+
782
+ translated_functions, translated_indices = parse_yaml(
783
+ translated_yaml_path, tags_yaml_path, function_filter, not use_aten_lib
784
+ )
785
+ custom_ops_functions, custom_ops_indices = parse_yaml(
786
+ custom_ops_yaml_path, tags_yaml_path, function_filter, True
787
+ )
788
+
789
+ # Convert BackendIndices to ETKernelIndex
790
+ if not isinstance(translated_indices, ETKernelIndex):
791
+ translated_indices = ETKernelIndex.from_backend_indices(translated_indices)
792
+ if not isinstance(custom_ops_indices, ETKernelIndex):
793
+ custom_ops_indices = ETKernelIndex.from_backend_indices(custom_ops_indices)
794
+
795
+ combined_functions = translated_functions + custom_ops_functions
796
+ combined_kernel_index = ETKernelIndex.merge_indices(
797
+ translated_indices, custom_ops_indices
798
+ )
799
+ combined_yaml = ETParsedYaml(combined_functions, combined_kernel_index)
800
+ custom_ops_parsed_yaml = ETParsedYaml(custom_ops_functions, custom_ops_indices)
801
+
802
+ return combined_yaml, custom_ops_parsed_yaml
803
+
804
+
805
+ def main() -> None:
806
+ parser = argparse.ArgumentParser(description="Generate operator source files")
807
+ # Although we don't refer to --source-path directly, make_file_manager()
808
+ # expects it to point to a directory that contains a templates/ subdirectory
809
+ # containing the file templates.
810
+ parser.add_argument(
811
+ "-s",
812
+ "--source-path",
813
+ help="path to source directory for kernel templates",
814
+ )
815
+ parser.add_argument(
816
+ "--functions-yaml-path",
817
+ "--functions_yaml_path",
818
+ help="path to the functions.yaml file to use. Optional, but at least "
819
+ "one of --functions-yaml-path and --custom-ops-yaml-path must be "
820
+ "specified.",
821
+ )
822
+ parser.add_argument(
823
+ "--custom-ops-yaml-path",
824
+ "--custom_ops_yaml_path",
825
+ help="path to the custom_ops.yaml file to use. Optional, but at least "
826
+ "one of --functions-yaml-path and --custom-ops-yaml-path must be "
827
+ "specified.",
828
+ )
829
+ parser.add_argument(
830
+ "--aten-yaml-path",
831
+ "--aten_yaml_path",
832
+ help="path to native_functions.yaml file.",
833
+ )
834
+ # Note that make_file_manager() also looks at --install-dir.
835
+ parser.add_argument(
836
+ "-d",
837
+ "--install-dir",
838
+ "--install_dir",
839
+ help="output directory",
840
+ default="build/generated",
841
+ )
842
+ parser.add_argument(
843
+ "-o",
844
+ "--output-dependencies",
845
+ help="output a list of dependencies into the given file and exit",
846
+ )
847
+ # Although we don't refer to --dry-run directly, make_file_manager() looks
848
+ # for it.
849
+ parser.add_argument(
850
+ "--dry-run",
851
+ action="store_true",
852
+ help="run without writing any files (still updates outputs)",
853
+ )
854
+ parser.add_argument(
855
+ "--static-dispatch-backend",
856
+ "--static_dispatch_backend",
857
+ nargs="*",
858
+ help="generate static dispatch code for the specific backend (if set)",
859
+ )
860
+ parser.add_argument(
861
+ "--op-registration-whitelist",
862
+ "--op_registration_whitelist",
863
+ nargs="*",
864
+ help="filter op registrations by the whitelist (if set); "
865
+ "each item is `namespace`::`operator name` without overload name; "
866
+ "e.g.: aten::empty aten::conv2d ...",
867
+ )
868
+ parser.add_argument(
869
+ "--op-selection-yaml-path",
870
+ "--op_selection_yaml_path",
871
+ help="Provide a path to the operator selection (for custom build) YAML "
872
+ "that contains the information about the set of selected operators "
873
+ "and their categories (training, ...). Each operator is either a "
874
+ "full operator name with overload or just a bare operator name. "
875
+ "The operator names also contain the namespace prefix (e.g. aten::)",
876
+ )
877
+ parser.add_argument(
878
+ "--tags-path",
879
+ help="Path to tags.yaml. Required by yaml parsing in codegen system.",
880
+ )
881
+ parser.add_argument(
882
+ "--rocm",
883
+ action="store_true",
884
+ help="reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly",
885
+ )
886
+ parser.add_argument(
887
+ "--use-aten-lib",
888
+ "--use_aten_lib",
889
+ action="store_true",
890
+ help="a boolean flag to indicate whether we use ATen kernels or not, in the future this flag will be per "
891
+ "operator",
892
+ )
893
+ parser.add_argument(
894
+ "--manual_registration",
895
+ "--manual-registration",
896
+ action="store_true",
897
+ help="a boolean flag to indicate whether we want to manually call"
898
+ "register_kernels() or rely on static init. ",
899
+ )
900
+ parser.add_argument(
901
+ "--generate",
902
+ type=str,
903
+ nargs="*",
904
+ choices=["headers", "sources"],
905
+ default=["headers", "sources"],
906
+ help="Generate only a subset of files",
907
+ )
908
+ options = parser.parse_args()
909
+ assert options.tags_path, "tags.yaml is required by codegen yaml parsing."
910
+
911
+ selector = get_custom_build_selector(
912
+ options.op_registration_whitelist,
913
+ options.op_selection_yaml_path,
914
+ )
915
+
916
+ parsed_yaml, custom_ops_parsed_yaml = parse_yaml_files(
917
+ aten_yaml_path=options.aten_yaml_path,
918
+ tags_yaml_path=options.tags_path,
919
+ native_yaml_path=options.functions_yaml_path,
920
+ custom_ops_yaml_path=options.custom_ops_yaml_path,
921
+ selector=selector,
922
+ use_aten_lib=options.use_aten_lib,
923
+ )
924
+ native_functions, kernel_index = (
925
+ parsed_yaml.native_functions,
926
+ parsed_yaml.kernel_index,
927
+ )
928
+ custom_ops_native_functions = (
929
+ custom_ops_parsed_yaml.native_functions if custom_ops_parsed_yaml else []
930
+ )
931
+
932
+ cpu_fm = make_file_manager(options=options)
933
+
934
+ if "headers" in options.generate:
935
+ # generate CustomOpsNativeFunctions.h when custom_ops.yaml is present, to match the build system.
936
+ gen_headers(
937
+ native_functions=native_functions,
938
+ gen_custom_ops_header=options.custom_ops_yaml_path,
939
+ custom_ops_native_functions=custom_ops_native_functions,
940
+ selector=selector,
941
+ kernel_index=kernel_index,
942
+ cpu_fm=cpu_fm,
943
+ use_aten_lib=options.use_aten_lib,
944
+ )
945
+
946
+ if "sources" in options.generate:
947
+ gen_unboxing(
948
+ native_functions=native_functions,
949
+ cpu_fm=cpu_fm,
950
+ selector=selector,
951
+ use_aten_lib=options.use_aten_lib,
952
+ kernel_index=kernel_index,
953
+ manual_registration=options.manual_registration,
954
+ )
955
+ if custom_ops_native_functions:
956
+ gen_custom_ops(
957
+ native_functions=custom_ops_native_functions,
958
+ selector=selector,
959
+ kernel_index=kernel_index,
960
+ cpu_fm=cpu_fm,
961
+ rocm=options.rocm,
962
+ )
963
+
964
+ if options.output_dependencies:
965
+ depfile_path = pathlib.Path(options.output_dependencies).resolve()
966
+ depfile_name = depfile_path.name
967
+ depfile_stem = depfile_path.stem
968
+
969
+ for fm, prefix in [
970
+ (cpu_fm, ""),
971
+ ]:
972
+ varname = prefix + depfile_stem
973
+ path = depfile_path.parent / (prefix + depfile_name)
974
+ fm.write_outputs(varname, str(path))
975
+
976
+
977
+ if __name__ == "__main__":
978
+ main()
env-llmeval/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py ADDED
@@ -0,0 +1,791 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Callable, List, Optional, Tuple, Union
3
+
4
+ from torchgen.api import cpp, dispatcher
5
+ from torchgen.api.translate import translate
6
+ from torchgen.api.types import (
7
+ BaseCType,
8
+ Binding,
9
+ CType,
10
+ DispatcherSignature,
11
+ FunctionalizationLambda,
12
+ iTensorListRefT,
13
+ NativeSignature,
14
+ tensorListT,
15
+ tensorT,
16
+ VectorCType,
17
+ ViewInverseSignature,
18
+ )
19
+ from torchgen.context import (
20
+ method_with_native_function,
21
+ native_function_manager,
22
+ with_native_function,
23
+ with_native_function_and,
24
+ )
25
+ from torchgen.model import (
26
+ Argument,
27
+ BackendIndex,
28
+ BaseTy,
29
+ BaseType,
30
+ FunctionSchema,
31
+ ListType,
32
+ NativeFunction,
33
+ NativeFunctionsGroup,
34
+ NativeFunctionsViewGroup,
35
+ Return,
36
+ SchemaKind,
37
+ SelfArgument,
38
+ TensorOptionsArguments,
39
+ )
40
+ from torchgen.native_function_generation import (
41
+ INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
42
+ MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT,
43
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
44
+ )
45
+
46
+ from torchgen.selective_build.selector import SelectiveBuilder
47
+
48
+
49
+ # Note: [Mutable Ops Not Using Functionalization]
50
+ # Ops in this list currently do not work with functionalization and should be fixed.
51
+ MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION = (
52
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
53
+ + MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
54
+ + INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
55
+ + [
56
+ # It will be BC-breaking, but we should fix their schemas.
57
+ # should be inplace?
58
+ "record_stream",
59
+ # See Note [resize_ in Functionalization]
60
+ "resize_",
61
+ "resize_as_",
62
+ # This function is used as for testing purposes only.
63
+ "_fill_mem_eff_dropout_mask_",
64
+ ]
65
+ )
66
+
67
+ # This file contains codegen that relates to the functionalization pass.
68
+ # It includes:
69
+ # - gen_functionalization_definition
70
+ # Generates dispatcher kernel definitions for the functionalization pass.
71
+ # - gen_functionalization_registration
72
+ # Generates dispatcher kernel registrations for the functionalization pass.
73
+ # - gen_functionalization_view_inverse_declaration
74
+ # Generates a declaration for an "inverse view", for every view op
75
+ # that is needed in functionalization. We manually implement their definitions.
76
+ # - gen_composite_view_copy_kernel
77
+ # Generates view_copy() composite kernels for all view_copy operators.
78
+
79
+
80
+ # Generates the body of the default composite C++ kernel for a {view}_copy NativeFunction
81
+ # See Note [view_copy NativeFunctions]
82
+ @dataclass(frozen=True)
83
+ class GenCompositeViewCopyKernel:
84
+ backend_index: BackendIndex
85
+
86
+ @method_with_native_function
87
+ def __call__(self, g: NativeFunctionsViewGroup) -> Optional[str]:
88
+ if g.view_copy is None:
89
+ return None
90
+
91
+ metadata = self.backend_index.get_kernel(g.view_copy)
92
+ assert metadata is not None
93
+
94
+ # We can make view_copy work in more cases by using reshape()
95
+ # when a normal view call would ordinarily fail.
96
+ # This also makes LTC more efficient, because they don't need to include
97
+ # clone() calls in their graph (which is normally needed by reshape).
98
+ if str(g.view_copy.func.name) == "view_copy":
99
+ assert metadata.kernel == "view_copy_symint"
100
+ return """\
101
+ at::Tensor view_copy_symint(const at::Tensor & self, at::SymIntArrayRef size) {
102
+ c10::SymDimVector shape = infer_size_dv(size, self.sym_numel());
103
+ if (!at::detail::computeStride(self.sym_sizes(), self.sym_strides(), shape).has_value()) {
104
+ return self.reshape_symint(size);
105
+ } else {
106
+ auto output = at::_ops::view::call(self, size);
107
+ return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);
108
+ }
109
+ }
110
+ """
111
+ # view_copy is a native signature, since we're generating an at::native:: kernel
112
+ # Functionalization always operates on symints though
113
+ view_copy_sig = NativeSignature(
114
+ g.view_copy.func, symint=metadata.supports_symint()
115
+ )
116
+
117
+ # view is a dispatcher signature, since we're calling into the at::_ops API
118
+ view_sig = DispatcherSignature(g.view.func)
119
+
120
+ view_api_name = g.view.func.name.unambiguous_name()
121
+ exprs = ", ".join(
122
+ [e.expr for e in translate(view_copy_sig.arguments(), view_sig.arguments())]
123
+ )
124
+
125
+ # view ops today always return either a Tensor or a list of Tensors
126
+ assert len(g.view.func.returns) == 1
127
+ assert g.view.func.returns[0].type == BaseType(
128
+ BaseTy.Tensor
129
+ ) or g.view.func.returns[0].type == ListType(BaseType(BaseTy.Tensor), None)
130
+
131
+ if g.view.func.returns[0].type == BaseType(BaseTy.Tensor):
132
+ return_cloned_output = """\
133
+ return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);"""
134
+ else:
135
+ # If the return type is a list, we need to clone each tensor in the list.
136
+ return_cloned_output = f"""\
137
+ {view_copy_sig.returns_type().cpp_type()} out_clone;
138
+ for (const auto i : c10::irange(output.size())) {{
139
+ out_clone.push_back(output[i].clone(/*memory_format=*/at::MemoryFormat::Contiguous));
140
+ }}
141
+ return out_clone;"""
142
+
143
+ # The default generated composite kernel for {view}_copy() operators just clones
144
+ # the input tensor, and runs the underlying view on the clone.
145
+ return f"""
146
+ {view_copy_sig.defn(name=metadata.kernel)} {{
147
+ auto output = at::_ops::{view_api_name}::call({exprs});
148
+ {return_cloned_output}
149
+ }}
150
+ """
151
+
152
+
153
+ def return_str(rets: Tuple[Return, ...], names: List[str]) -> str:
154
+ assert len(rets) == len(names)
155
+ if len(rets) == 0:
156
+ return ""
157
+ elif len(rets) == 1:
158
+ return f"return {names[0]};"
159
+ else:
160
+ return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
161
+
162
+
163
+ def modifies_arguments(f: NativeFunction) -> bool:
164
+ return any(
165
+ a.annotation is not None and a.annotation.is_write
166
+ for a in f.func.arguments.flat_all
167
+ )
168
+
169
+
170
+ def wrapper_name(func: FunctionSchema) -> str:
171
+ if func.name.overload_name:
172
+ return f"{cpp.name(func)}_{func.name.overload_name}"
173
+ else:
174
+ return cpp.name(func)
175
+
176
+
177
+ def is_tensor_like(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> bool:
178
+ return isinstance(a, SelfArgument) or (
179
+ isinstance(a, Argument) and a.type.is_tensor_like()
180
+ )
181
+
182
+
183
+ # We need to wrap / unwrap various arguments from the op in the functionalization kernels.
184
+ # Some op schemas include non-owning types though (like TensorList),
185
+ # and when we unwrap them we expect to get out an owning type!.
186
+ # We also return a lambda that tells you how to conver the non-owning type argument into the owning type.
187
+ def get_owning_type(t: CType) -> Tuple[CType, Callable[[str], str]]:
188
+ if t == BaseCType(tensorListT):
189
+ return VectorCType(BaseCType(tensorT)), lambda x: f"{x}.vec()"
190
+ if t == BaseCType(iTensorListRefT):
191
+ return VectorCType(BaseCType(tensorT)), lambda x: f"{{{x}.begin(), {x}.end()}}"
192
+ # There are technically other non-owning types out there (like IntArrayRef),
193
+ # but functionalization only actually cares about the ones involving tensors.
194
+ return t, lambda x: x
195
+
196
+
197
+ # unwraps all tensor-like arguments, returning:
198
+ # (1) a string containing all of the logic that does the unwrapping
199
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
200
+ def unwrap_tensor_args(
201
+ sig: DispatcherSignature, *, is_view_op: bool
202
+ ) -> Tuple[str, List[Binding]]:
203
+ context: List[Binding] = []
204
+ unwrapped_tensor_args: List[str] = []
205
+ for arg in sig.arguments():
206
+ if is_tensor_like(arg.argument):
207
+ # for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
208
+ unwrapped_name = f"{arg.name}_"
209
+ # For most ops, the functionalization needs to sync any pending updates on the input tensors
210
+ # before calling the operator, since otherwise the operator will act on stale data.
211
+ # For view ops though, we can continue to defer syncing until the tensor is used by
212
+ # a non-view operator.
213
+ maybe_sync_input = (
214
+ "" if is_view_op else f"at::functionalization::impl::sync({arg.name});"
215
+ )
216
+ unwrapped_type, conversion_fn = get_owning_type(
217
+ arg.nctype.remove_const_ref().type
218
+ )
219
+ unwrapped_tensor_args.append(
220
+ f"""
221
+ {unwrapped_type.cpp_type()} {unwrapped_name};
222
+ if (at::functionalization::impl::isFunctionalTensor({arg.name})) {{
223
+ {maybe_sync_input}
224
+ {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});
225
+ }} else {{
226
+ {unwrapped_name} = {conversion_fn(arg.name)};
227
+ }}"""
228
+ )
229
+ context.append(arg.with_name(unwrapped_name))
230
+ else:
231
+ # for non-tensor inputs, we want to pass them directly into the redispatch calls.
232
+ context.append(arg)
233
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
234
+ return unwrap_tensor_args_str, context
235
+
236
+
237
+ # converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
238
+ # (1) a string containing all of the logic that does the conversions.
239
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
240
+ def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
241
+ context: List[Binding] = []
242
+ unwrapped_tensor_args: List[str] = []
243
+ for arg in sig.arguments():
244
+ if is_tensor_like(arg.argument):
245
+ # for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
246
+ a_ = arg.name
247
+ unwrapped_name = f"{arg.name}_meta"
248
+ unwrapped_tensor_args.append(f"auto {unwrapped_name} = to_meta({a_});")
249
+ context.append(arg.with_name(unwrapped_name))
250
+ else:
251
+ # for non-tensor inputs, we want to pass them directly into the redispatch calls.
252
+ context.append(arg)
253
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
254
+ return unwrap_tensor_args_str, context
255
+
256
+
257
+ # The functionalization codegen currently expects view op schemas to have this form:
258
+ # foo(Tensor(a), ...) -> Tensor(a) (e.g. transpose)
259
+ # foo(Tensor(a!), ...) -> Tensor(a!) (e.g. transpose_)
260
+ def assert_view_op_properties(func: FunctionSchema) -> None:
261
+ def is_alias(a: Argument) -> bool:
262
+ return a.annotation is not None
263
+
264
+ args = func.arguments.flat_non_out
265
+ # The first argument is a tensor with an alias semantics (annotations)
266
+ assert len(args) > 0 and args[0].type == BaseType(
267
+ BaseTy.Tensor
268
+ ), f"""In the functionalization codegen, we expect the first argument of every view operator to be a tensor,
269
+ but found an argument of type {str(args[0].type)} for operator: {str(func.name)}."""
270
+ # No other arguments have aliasing semantics
271
+ assert is_alias(args[0]) and not any(
272
+ is_alias(a) for a in args[1:]
273
+ ), """In the functionalization codegen, we expect the first argument of every view operator to alias the output.
274
+ View operators with multiple aliasing inputs aren't supported yet. Found an operator that doesn't satisfy this constraint"""
275
+
276
+
277
+ # Generates the Functionalization kernel for:
278
+ # - ops that create aliases (e.g. transpose())
279
+ # - ops that are views AND mutations (e.g. transpose_())
280
+ def emit_view_functionalization_body(
281
+ g: NativeFunctionsViewGroup, *, view_inplace: bool
282
+ ) -> str:
283
+ if view_inplace:
284
+ # This op is both an inplace op AND a view op.
285
+ # See Note [Functionalization Pass - Inplace View Ops] for details.
286
+ # I currently have the view meta call into the out-of-place variant of the view, to avoid
287
+ # having to define an extra ~20 inplace {view}_inverse_ functions.
288
+ # Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
289
+ # I'm assuming that every inplace-view op has a corresponding out-of-place view op,
290
+ # with the same name but the trailing underscore removed.
291
+ # This is currently asserted at parse time in gen.py (see error_check_native_functions).
292
+ assert g.view_inplace is not None
293
+ f = g.view_inplace
294
+ else:
295
+ f = g.view
296
+
297
+ assert g.view_copy is not None
298
+ with native_function_manager(f):
299
+ call_sig = DispatcherSignature.from_schema(g.view_copy.func)
300
+
301
+ # the "view_copy" op name that the functionalization kernels need to call
302
+ api_name = g.view_copy.func.name.unambiguous_name()
303
+ # Sometimes the functionalization pass needs to no-op (e.g. if it was passed non-functional tensors)
304
+ # "no-op"ing in this context is just redispatching to the original op.
305
+ noop_api_name = f.func.name.unambiguous_name()
306
+
307
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
308
+ assert_view_op_properties(f.func)
309
+ view_tensor_name = dispatcher_sig.arguments()[0].name
310
+
311
+ return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
312
+
313
+ unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
314
+ dispatcher_sig, is_view_op=True
315
+ )
316
+ view_redispatch_args = [
317
+ e.expr
318
+ for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)
319
+ ]
320
+
321
+ forward_lambda = FunctionalizationLambda.from_func(g, is_reverse=False)
322
+ reverse_lambda = FunctionalizationLambda.from_func(g, is_reverse=True)
323
+
324
+ # The meta API call should use the same arguments, but convert all tensors to meta tensors first.
325
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
326
+ meta_call_args = [
327
+ e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)
328
+ ]
329
+
330
+ if "inplace_view" in f.tags:
331
+ # See Note [Functionalization Pass - Inplace View Ops] for more details
332
+ return f"""
333
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
334
+ if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
335
+ // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
336
+ {unwrap_tensor_args_str}
337
+ at::AutoDispatchSkipFunctionalize guard;
338
+ return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
339
+ }}
340
+ auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
341
+ at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
342
+ {forward_lambda.decl()} {{
343
+ if (reapply_views) {{
344
+ return {forward_lambda.inner_call(reapply_views=True)}
345
+ }} else {{
346
+ return {forward_lambda.inner_call(reapply_views=False)}
347
+ }}
348
+ }},
349
+ {reverse_lambda.decl()} {{
350
+ return {reverse_lambda.inner_call()}
351
+ }}
352
+ );
353
+ auto compute_reference_meta =
354
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
355
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
356
+ {return_type} reference_tensor_output;
357
+ if (compute_reference_meta) {{
358
+ {meta_conversion_str}
359
+ at::AutoDispatchSkipFunctionalize func_guard;
360
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
361
+ reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
362
+ }}
363
+ // This function adds the above view meta to the current tensor and replays them off the base,
364
+ // mutating the size/stride info of the current FunctionalTensorWrapper.
365
+ // Because of this, we need to make sure to run the reference shape function above,
366
+ // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
367
+ at::functionalization::impl::mutate_view_meta({view_tensor_name}, view_meta);
368
+ // See Note [Propagating strides in the functionalization pass]
369
+ // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
370
+ // on a reference implementation here (instead of relying on the output from the forward lambda
371
+ // having the correct stride info)
372
+ if (compute_reference_meta) {{
373
+ at::functionalization::impl::set_sizes_strides_offset({view_tensor_name}, reference_tensor_output);
374
+ }}
375
+ return {view_tensor_name};
376
+ }}
377
+ """
378
+
379
+ else:
380
+ is_multi_output_view = isinstance(f.func.returns[0].type, ListType)
381
+ return f"""
382
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
383
+ {unwrap_tensor_args_str}
384
+ if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
385
+ // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
386
+ at::AutoDispatchSkipFunctionalize guard;
387
+ return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
388
+ }}
389
+ auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
390
+ auto compute_reference_meta =
391
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
392
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
393
+ {return_type} reference_tensor_output;
394
+ if (compute_reference_meta) {{
395
+ {meta_conversion_str}
396
+ at::AutoDispatchSkipFunctionalize func_guard;
397
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
398
+ reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
399
+ }}
400
+ {return_type} tmp_output;
401
+ {{
402
+ at::AutoDispatchSkipFunctionalize guard;
403
+ if (reapply_views) {{
404
+ tmp_output = at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
405
+ }} else {{
406
+ tmp_output = at::_ops::{api_name}::call({', '.join(view_redispatch_args)});
407
+ }}
408
+ }}
409
+ at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
410
+ {forward_lambda.decl()} {{
411
+ if (reapply_views) {{
412
+ return {forward_lambda.inner_call(reapply_views=True)}
413
+ }} else {{
414
+ return {forward_lambda.inner_call(reapply_views=False)}
415
+ }}
416
+ }},
417
+ {reverse_lambda.decl()} {{
418
+ return {reverse_lambda.inner_call()}
419
+ }},
420
+ /*is_multi_output=*/{str(is_multi_output_view).lower()}
421
+ );
422
+ auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, {view_tensor_name}, view_meta);
423
+ // See Note [Propagating strides in the functionalization pass]
424
+ if (compute_reference_meta) {{
425
+ at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
426
+ }}
427
+ return out;
428
+ }}
429
+ """
430
+
431
+
432
+ def maybe_create_output(f: NativeFunction, var_name: str) -> str:
433
+ if len(f.func.returns) == 0:
434
+ return ""
435
+ return_type = dispatcher.returns_type(f.func.returns).remove_const_ref().cpp_type()
436
+ return f"{return_type} {var_name} = "
437
+
438
+
439
+ # Given a NativeFunction, and a variable name corresponding to the output of redispatching on the function,
440
+ # this returns two lists of names, consisting of:
441
+ # - the names of returns corresponding to the original (mutable) inputs of the outer function
442
+ # - the names of returns corresponding to the (immutable) outputs of the inner redispatched function
443
+ def get_mutable_redispatch_return_names(
444
+ f: NativeFunction, inner_return_var: str
445
+ ) -> Tuple[List[str], List[str]]:
446
+ aliased_returns = []
447
+ non_aliased_returns = []
448
+ for i, name in enumerate(f.func.aliased_return_names()):
449
+ if name is not None:
450
+ aliased_returns.append(name)
451
+ else:
452
+ non_aliased_returns.append(
453
+ inner_return_var
454
+ if len(f.func.returns) == 1
455
+ else f"std::get<{i}>({inner_return_var})"
456
+ )
457
+ return aliased_returns, non_aliased_returns
458
+
459
+
460
+ # When functionalization "no-op's" and redispatches on a mutable operator, we need to take care so that:
461
+ # - For fresh outputs, we return the result of the redispatch (without wrapping outputs)
462
+ # - For outputs that were aliased to inputs, we return the inputs directly (since some of them might have been wrapped)
463
+ def return_from_mutable_noop_redispatch(
464
+ f: NativeFunction, inner_return_var: str
465
+ ) -> str:
466
+ aliased, non_aliased = get_mutable_redispatch_return_names(f, inner_return_var)
467
+ # Just get all of the return names, and immediately return them
468
+ return return_str(f.func.returns, aliased + non_aliased)
469
+
470
+
471
+ def wrap_propagate_mutations_and_return(
472
+ f: NativeFunction, functional_op: NativeFunction, inner_return_var: str
473
+ ) -> str:
474
+ mutable_arg_names = f.func.arguments.mutable_arg_names()
475
+ (
476
+ aliased_outer_rets,
477
+ non_aliased_outer_rets,
478
+ ) = get_mutable_redispatch_return_names(f, inner_return_var)
479
+ _, non_aliased_inner_rets = get_mutable_redispatch_return_names(
480
+ functional_op, inner_return_var
481
+ )
482
+ # The outer function may have a mix of aliased and non-aliased outputs,
483
+ # But the inner functional op that we're transforming to should only have non-aliased outputs
484
+ assert len(mutable_arg_names) + len(non_aliased_outer_rets) == len(
485
+ non_aliased_inner_rets
486
+ )
487
+
488
+ # First, take all of the newly created outputs from the inner call and wrap them into functional tensors
489
+ updates = []
490
+ non_aliased_wrapped_ret_names = []
491
+ for i, inner_ret in enumerate(
492
+ non_aliased_inner_rets[: len(non_aliased_outer_rets)]
493
+ ):
494
+ ret_name = f"output_{i}"
495
+ updates.append(
496
+ f"""\
497
+ auto output_{i} = at::functionalization::impl::to_functional_tensor({inner_ret});"""
498
+ )
499
+ non_aliased_wrapped_ret_names.append(ret_name)
500
+
501
+ # Next, take all of the mutated outputs from the inner call corresponding to mutated inputs,
502
+ # and propagate the mutations
503
+ for outer_arg, inner_ret in zip(
504
+ mutable_arg_names, non_aliased_inner_rets[len(non_aliased_outer_rets) :]
505
+ ):
506
+ updates.append(
507
+ f"""\
508
+ at::functionalization::impl::propagate_xla_data({outer_arg}, {inner_ret});
509
+ at::functionalization::impl::replace_({outer_arg}, {inner_ret});
510
+ at::functionalization::impl::commit_update({outer_arg});
511
+ at::functionalization::impl::sync({outer_arg});"""
512
+ )
513
+
514
+ # Finally, we return:
515
+ # - Any mutable arguments that also returns
516
+ # - Any immutable returns that were created wrapping the output from the inner call
517
+ returns_str = return_str(
518
+ f.func.returns, aliased_outer_rets + non_aliased_wrapped_ret_names
519
+ )
520
+ updates_str = "\n".join(updates)
521
+ return f"""\
522
+ {updates_str}
523
+ {returns_str}"""
524
+
525
+
526
+ # Generates the Functionalization kernel for:
527
+ # - mutation ops (inplace and out= ops)
528
+ @with_native_function_and
529
+ def emit_inplace_functionalization_body(
530
+ f: NativeFunction, g: NativeFunctionsGroup
531
+ ) -> str:
532
+ # mutation case
533
+ assert modifies_arguments(f)
534
+
535
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
536
+
537
+ unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
538
+ dispatcher_sig, is_view_op=False
539
+ )
540
+
541
+ mutated_names = [
542
+ a.name
543
+ for a in f.func.arguments.flat_all
544
+ if a.type.is_tensor_like() and a.annotation is not None
545
+ ]
546
+ non_mutated_names = [
547
+ a.name
548
+ for a in f.func.arguments.flat_all
549
+ if a.type.is_tensor_like() and a.annotation is None
550
+ ]
551
+ non_mutated_tensor_names = [
552
+ a.name
553
+ for a in f.func.arguments.flat_all
554
+ if a.type == BaseType(BaseTy.Tensor) and a.annotation is None
555
+ ]
556
+ # all mutable inputs must be functional tensors in order to participate in functionalization
557
+ check_all_mutated_args_are_functional = " && ".join(
558
+ ["true"]
559
+ + [
560
+ f"at::functionalization::impl::isFunctionalTensor({a})"
561
+ for a in mutated_names
562
+ ]
563
+ )
564
+ check_any_non_mutated_args_are_functional = " || ".join(
565
+ ["false"]
566
+ + [
567
+ f"at::functionalization::impl::isFunctionalTensor({a})"
568
+ for a in non_mutated_names
569
+ ]
570
+ )
571
+
572
+ check_any_non_mutated_tensors_are_xla = " || ".join(
573
+ ["false"]
574
+ + [
575
+ f"{a}.device().type() == c10::DeviceType::XLA"
576
+ for a in non_mutated_tensor_names
577
+ ]
578
+ )
579
+ # These are used in the cases where we don't functionalize and redispatch to the inplace op
580
+ # case 1: we hit an inplace op that doesn't have an out-of-place equivalent
581
+ # case 2: we hit an inplace ops but our inputs are not functional tensors (in which case our kernel just no-ops)
582
+ inplace_exprs = [
583
+ e.expr
584
+ for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)
585
+ ]
586
+
587
+ # call the out-of-place variant of the op
588
+ return_type = (
589
+ dispatcher.returns_type(g.functional.func.returns).remove_const_ref().cpp_type()
590
+ )
591
+ functional_sig = DispatcherSignature.from_schema(g.functional.func)
592
+ functional_exprs = [
593
+ e.expr
594
+ for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)
595
+ ]
596
+
597
+ if f.func.is_out_fn():
598
+ mutable_input_post_processing = "\n".join(
599
+ [
600
+ f"""
601
+ at::functionalization::impl::replace_(
602
+ {a.name}, {'std::get<' + str(i) + '>(tmp_output)' if len(f.func.returns) > 1 else 'tmp_output'});
603
+ at::functionalization::impl::commit_update({a.name});"""
604
+ for (i, a) in enumerate(f.func.arguments.out)
605
+ if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
606
+ ]
607
+ )
608
+ else:
609
+ mutable_input_post_processing = "\n".join(
610
+ [
611
+ f"""
612
+ at::functionalization::impl::replace_({a.name}, tmp_output);
613
+ at::functionalization::impl::commit_update({a.name});"""
614
+ for a in f.func.arguments.flat_all
615
+ if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
616
+ ]
617
+ )
618
+
619
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
620
+ # We don't want to run the inplace meta func for ops like .set_(), because:
621
+ # (1) they're unnecessary: inplace meta checks are only useful for ops like add_(),
622
+ # where broadcasting will work for the out-of-place case but should fail on the inplace call
623
+ # (2) They'll also fail without adding extra infra: we'd need to convert the input storage argument
624
+ # into a meta storage
625
+ any_storage_args = any(
626
+ a.type == BaseType(BaseTy.Storage) for a in f.func.arguments.flat_all
627
+ )
628
+
629
+ return f"""
630
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
631
+ if ({str(not any_storage_args and f.func.kind() == SchemaKind.inplace).lower()}) {{
632
+ // Before converting the mutable op to its functional variant, run meta tensors through the original op.
633
+ // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
634
+ // (We can only do this for inplace ops today though, because they technically all support meta tensors).
635
+ {meta_conversion_str}
636
+ at::AutoDispatchSkipFunctionalize func_guard;
637
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
638
+ at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(a.name for a in meta_call_ctx)});
639
+ }}
640
+ {unwrap_tensor_args_str}
641
+ if (!({check_all_mutated_args_are_functional})) {{
642
+ // We want to disable this check if there are any XLA tensors.
643
+ // cpu_tensor.copy_(xla_tensor) is valid code.
644
+ if (!({check_any_non_mutated_tensors_are_xla}) && ({check_any_non_mutated_args_are_functional})) {{
645
+ // case 1: trying to mutate a non functional tensor with a functional tensor is an error
646
+ TORCH_INTERNAL_ASSERT(false,
647
+ "mutating a non-functional tensor with a functional tensor is not allowed.",
648
+ " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
649
+ }} else {{
650
+ // case 2: arguments are not functional tensors, so we no-op and redispatch.
651
+ at::AutoDispatchSkipFunctionalize guard;
652
+ {maybe_create_output(f, 'tmp_output')}at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(inplace_exprs)});
653
+ {return_from_mutable_noop_redispatch(f, 'tmp_output')};
654
+ }}
655
+ }} else {{
656
+ {return_type} tmp_output;
657
+ {{
658
+ at::AutoDispatchSkipFunctionalize guard;
659
+ tmp_output = at::_ops::{g.functional.func.name.unambiguous_name()}::call({', '.join(functional_exprs)});
660
+ }}
661
+ {wrap_propagate_mutations_and_return(f, g.functional, 'tmp_output')}
662
+ }}
663
+ }}"""
664
+
665
+
666
+ # The below functions generate RegisterFunctionalization.cpp
667
+ # These files provide the kernels that run the functionalization pass, which can be opted into
668
+ # per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
669
+
670
+
671
+ # See Note [Functionalization Pass: View Inverses].
672
+ def gen_functionalization_view_inverse_declaration(
673
+ selector: SelectiveBuilder, g: NativeFunctionsViewGroup
674
+ ) -> Optional[str]:
675
+ # For every (non-composite) view op, we need a corresponding "inverse view" function.
676
+ # This generates the declarations so we get a good compiler error when someone adds a new view.
677
+ @with_native_function
678
+ def emit_decl_helper(g: NativeFunctionsViewGroup) -> Optional[str]:
679
+ if g.view.has_composite_implicit_autograd_kernel:
680
+ return None
681
+ view_copy_inverse_sig = ViewInverseSignature(g)
682
+ return view_copy_inverse_sig.decl()
683
+
684
+ return emit_decl_helper(g)
685
+
686
+
687
+ def gen_functionalization_registration(
688
+ selector: SelectiveBuilder,
689
+ g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup],
690
+ composite_implicit_autograd_index: BackendIndex,
691
+ ) -> List[str]:
692
+ @with_native_function
693
+ def emit_registration_helper(f: NativeFunction) -> str:
694
+ assert not f.has_composite_implicit_autograd_kernel
695
+ registration_str = f"TORCH_FN(functionalization::{wrapper_name(f.func)})"
696
+ return f'm.impl("{f.func.name}", {registration_str});'
697
+
698
+ # Don't generate kernels in mobile build
699
+ if not selector.include_all_operators:
700
+ return []
701
+
702
+ if isinstance(g, NativeFunctionsViewGroup):
703
+ # functionalization needs to register kernels for view + view_inplace ops
704
+ # See Note [Functionalization <> torch.Tensor constructor]
705
+ if str(g.view.func.name) == "lift_fresh":
706
+ return []
707
+ view_str = []
708
+ if not g.view.has_composite_implicit_autograd_kernel:
709
+ view_str.append(emit_registration_helper(g.view))
710
+ if (
711
+ g.view_inplace is not None
712
+ and not g.view_inplace.has_composite_implicit_autograd_kernel
713
+ ):
714
+ assert g.view_inplace.is_view_op
715
+ view_str.append(emit_registration_helper(g.view_inplace))
716
+ return view_str
717
+
718
+ elif isinstance(g, NativeFunctionsGroup):
719
+ # Gets a hand-written functionalization kernel
720
+ if g.inplace is not None and str(g.inplace.func.name) == "set_.source_Tensor":
721
+ fns = []
722
+ else:
723
+ fns = list(g.functions())
724
+ else:
725
+ if str(g.func.name) in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
726
+ return []
727
+ fns = [g]
728
+
729
+ registrations = []
730
+ for f in fns:
731
+ if f.has_composite_implicit_autograd_kernel:
732
+ continue
733
+ if str(f.func.name) == "lift":
734
+ # See Note [Functionalization <> torch.Tensor constructor]
735
+ return []
736
+ if str(f.func.name) == "resize_":
737
+ # See Note [resize_ in Functionalization]
738
+ return []
739
+ assert not f.is_view_op
740
+ # functionalization needs to generate and register kernels for inplace ops.
741
+ # We *also* need to directly register CompositeImplicitAUtograd kernels
742
+ # so that they decompose properly before functioanlization.
743
+ if modifies_arguments(f):
744
+ registrations.append(emit_registration_helper(f))
745
+ return registrations
746
+
747
+
748
+ def gen_functionalization_definition(
749
+ selector: SelectiveBuilder,
750
+ # Note: Ideally this code should never have to look at NativeFunction
751
+ # (and instead only need to operate on grouped NativeFunctions).
752
+ # The only reason currently is because we need to emit direct dispatch registrations
753
+ # For CompositeImplicitAutograd operators, which are potentially ungrouped.
754
+ g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup],
755
+ ) -> List[str]:
756
+ # Don't generate kernels in mobile build
757
+ if not selector.include_all_operators:
758
+ return []
759
+
760
+ if isinstance(g, NativeFunctionsViewGroup):
761
+ # Case 1: emit view -> view_copy kernels for the functionalization pass
762
+ view_defs = []
763
+ if not g.composite:
764
+ # invariant: NativeFunctionsViewGroup's always have a view_copy operator
765
+ # if the view is not composite (implicit autograd)
766
+ assert g.view_copy is not None
767
+ view_defs.append(emit_view_functionalization_body(g, view_inplace=False))
768
+ if g.view_inplace is not None:
769
+ view_defs.append(emit_view_functionalization_body(g, view_inplace=True))
770
+ return view_defs
771
+ elif isinstance(g, NativeFunction):
772
+ # Invariant: all mutable operators that we need to handle in functionalization
773
+ # should have been properly grouped up.
774
+ # TODO: The below ops all have "problematic" schemas that prevent them from
775
+ # getting functionalized. Instead of bending over backwards to get things to work,
776
+ # I think we should either:
777
+ # (1) fix their schemas (BC-breaking)
778
+ # (2) hand-write their functionalization kernels
779
+ if str(g.func.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
780
+ assert g.has_composite_implicit_autograd_kernel or not modifies_arguments(g)
781
+ return []
782
+ else:
783
+ # Case 2: emit inplace -> out-of-place kernels for the functionalization pass
784
+ mutation_defs = []
785
+ mutation_defs.append(emit_inplace_functionalization_body(g.out, g))
786
+ if g.inplace is not None:
787
+ mutation_defs.append(emit_inplace_functionalization_body(g.inplace, g))
788
+ if g.mutable is not None:
789
+ mutation_defs.append(emit_inplace_functionalization_body(g.mutable, g))
790
+ return mutation_defs
791
+ return []
env-llmeval/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py ADDED
@@ -0,0 +1,605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pathlib
4
+ import re
5
+ from collections import Counter, namedtuple
6
+ from typing import (
7
+ Any,
8
+ Callable,
9
+ Dict,
10
+ Iterable,
11
+ Iterator,
12
+ List,
13
+ Optional,
14
+ Sequence,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ )
19
+
20
+ import yaml
21
+
22
+ import torchgen.dest as dest
23
+
24
+ from torchgen.api.lazy import setValueT
25
+ from torchgen.api.types import BaseCppType
26
+ from torchgen.dest.lazy_ir import GenLazyIR, GenLazyNativeFuncDefinition, GenTSLazyIR
27
+ from torchgen.gen import get_grouped_native_functions, parse_native_yaml
28
+
29
+ from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
30
+ from torchgen.selective_build.selector import SelectiveBuilder
31
+ from torchgen.utils import concatMap, FileManager, NamespaceHelper
32
+ from torchgen.yaml_utils import YamlLoader
33
+ from .gen_backend_stubs import (
34
+ error_on_missing_kernels,
35
+ gen_dispatcher_registrations,
36
+ gen_dispatchkey_nativefunc_headers,
37
+ parse_backend_yaml,
38
+ )
39
+
40
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
41
+ #
42
+ # Lazy Tensor Codegen
43
+ #
44
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
45
+ # Overview
46
+ # ~~~~~~~~
47
+ #
48
+ # This codegen script builds on existing data models and helpers used
49
+ # by all ATen backends, and adds new functionality specific to lazy
50
+ # tensor backends.
51
+ #
52
+ # Inputs:
53
+ # - <backend>_native_functions.yaml: controls which operators are
54
+ # supported by the backend.
55
+ #
56
+ # Outputs:
57
+ # (for all backends)
58
+ # <DispatchKey>Ir.h defines Lazy IR classes to be constructed during tracing
59
+ # - opt-in: also generate 'lowering' methods for the TorchScript backend only
60
+ # <DispatchKey>NativeFunctions.cpp defines implementations of native functions which perform lazy tracing
61
+ # - opt-in: 'full_codegen' section of backend yaml; 'supported' section omits these implementations
62
+ # <DispatchKey>NativeFunctions.h declares implementations of native functions for both 'supported' and 'full_codegen'
63
+ # ops
64
+ #
65
+ # Register<DispatchKey>.cpp registers all op implementations with the dispatcher
66
+ # RegisterAutograd<DispatchKey>.cpp registers all autograd implementations with the dispatcher
67
+ #
68
+ # Validation Helpers:
69
+ # - Shape Inference: errs if any ops in backend yaml require shape inference not provided by meta kernels or
70
+ # implementations in torch/csrc/lazy/core/shape_inference.*
71
+ # - native function impls: errs if any 'supported' ops do not have an implementation defined in the backend
72
+ # (non-codegen) implementation file
73
+ #
74
+ #
75
+ # About the Data Model
76
+ # ~~~~~~~~~~~~~~~~~~~~
77
+ #
78
+ # Modeled after ATen codegen, the first step is to parse yaml and build a data model for the operators
79
+ # we care about. In this case, the <backend>_native_functions yaml defines a subset of the core operators
80
+ # (defined in more detail in the main native_functions.yaml), which will be supported by your backend.
81
+ # Backends can list ops in two categories:
82
+ # - `supported` ops require hand-implementations but still get codegenned declarations and registrations
83
+ # - `full_codegen` ops get implementations (and IR classes) generated too
84
+ #
85
+ # Each native function is modeled as an object with a schema, and each schema has objects representing their
86
+ # arguments. Much of the codegen is manipulation of the arguments and their types. For example, lazy tensor
87
+ # backends need to transform 'at::Tensor' arguments into 'lazy::Value' objects, as well as replacing reference
88
+ # types (stringref) with actual string objects, and this is done by manipulating the data model objects.
89
+ # - see api/lazy.py for the lazy data model
90
+ #
91
+ # Once the data model is set up, the rest of this script processes a number of templates for output CPP file
92
+ # and fills in the template values using helpers in `dest/lazy_ir.py` and `dest/lazy_ts_lowering.py`. These
93
+ # helpers mostly iterate over functions and their arguments, outputting different c++ snippets.
94
+ #
95
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
96
+
97
+
98
+ # Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
99
+ # Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping, full_codegen)
100
+ ParsedExternalYaml = namedtuple(
101
+ "ParsedExternalYaml",
102
+ ["backend_key", "autograd_key", "cpp_namespace", "backend_indices", "full_codegen"],
103
+ )
104
+
105
+
106
+ def parse_native_functions_keys(
107
+ backend_yaml_path: str,
108
+ grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
109
+ ) -> Tuple[List[OperatorName], List[Any], List[OperatorName]]:
110
+ native_functions_map: Dict[OperatorName, NativeFunction] = {
111
+ f.func.name: f
112
+ for f in concatMap(
113
+ lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
114
+ grouped_native_functions,
115
+ )
116
+ }
117
+
118
+ with open(backend_yaml_path) as f:
119
+ yaml_values = yaml.load(f, Loader=YamlLoader)
120
+ assert isinstance(yaml_values, dict)
121
+
122
+ full_codegen = yaml_values.pop("full_codegen", [])
123
+ non_native = yaml_values.pop("non_native", [])
124
+ ir_gen = yaml_values.pop("ir_gen", [])
125
+ assert isinstance(full_codegen, list)
126
+ assert isinstance(non_native, list)
127
+ assert isinstance(ir_gen, list)
128
+ full_codegen_opnames = [OperatorName.parse(name) for name in full_codegen]
129
+ ir_gen_opnames = [OperatorName.parse(name) for name in ir_gen]
130
+ return full_codegen_opnames, non_native, ir_gen_opnames
131
+
132
+
133
+ def validate_shape_inference_header(
134
+ shape_inference_hdr: str, expected_shape_infr_decls: List[str]
135
+ ) -> None:
136
+ try:
137
+ with open(shape_inference_hdr) as f:
138
+ shape_infr_decls = f.read()
139
+ shape_infr_decl_lines = set(shape_infr_decls.split("\n"))
140
+ except OSError as e:
141
+ raise AssertionError(
142
+ f"Unable to read from the specified shape_inference_hdr file: {shape_inference_hdr}"
143
+ ) from e
144
+
145
+ shape_infr_regex = r"compute_shape_(\w+)"
146
+ actual_shape_infr_name_counts = Counter(
147
+ re.findall(shape_infr_regex, shape_infr_decls)
148
+ )
149
+ # TODO(whc) add a check for shape inference functions that have meta kernels implement and should be retired.
150
+
151
+ missing_decls = [
152
+ decl for decl in expected_shape_infr_decls if decl not in shape_infr_decl_lines
153
+ ]
154
+ if missing_decls:
155
+ raise Exception(
156
+ f"""Missing shape inference function.\n
157
+ Please add declare this function in {shape_inference_hdr}:\n
158
+ and implement it in the corresponding shape_inference.cpp file.\n
159
+ {os.linesep.join(missing_decls)}"""
160
+ )
161
+
162
+
163
+ # Some helper functions for the codegen.
164
+ def get_ltc_helper_fns() -> str:
165
+ return """\
166
+ at::Tensor to_meta(const at::Tensor& tensor) {
167
+ // undefined tensors can't be converted to the meta device, since they don't have sizes/strides
168
+ if (!tensor.defined()) return tensor;
169
+ auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), \
170
+ /*dtype=*/c10::make_optional(tensor.scalar_type()), /*layout=*/c10::make_optional(tensor.layout()), \
171
+ /*device=*/c10::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/c10::nullopt);
172
+ // needs to handle wrapped numbers, so dtype promotion works properly.
173
+ if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
174
+ out.unsafeGetTensorImpl()->set_wrapped_number(true);
175
+ }
176
+ return out;
177
+ }
178
+ c10::optional<at::Tensor> to_meta(const c10::optional<at::Tensor>& tensor) {
179
+ if (tensor.has_value()) {
180
+ return to_meta(*tensor);
181
+ }
182
+ return c10::nullopt;
183
+ }
184
+
185
+ std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
186
+ std::vector<at::Tensor> outs;
187
+ outs.reserve(t_list.size());
188
+ for (const auto& tensor : t_list) {
189
+ outs.push_back(to_meta(tensor));
190
+ }
191
+ return outs;
192
+ }
193
+ """
194
+
195
+
196
+ class default_args:
197
+ node_base: str = "Node"
198
+ node_base_hdr: Optional[str] = None
199
+ shape_inference_hdr: str = "torch/csrc/lazy/core/shape_inference.h"
200
+ tensor_class: str = "torch::lazy::LazyTensor"
201
+ tensor_class_hdr: str = "torch/csrc/lazy/core/tensor.h"
202
+ lazy_ir_generator: Type[GenLazyIR] = GenLazyIR
203
+ native_func_definition_generator: Type[
204
+ GenLazyNativeFuncDefinition
205
+ ] = GenLazyNativeFuncDefinition
206
+ backend_name: str = "TorchScript"
207
+
208
+
209
+ def main() -> None:
210
+ parser = argparse.ArgumentParser(description="Generate Lazy Tensor backend files")
211
+ parser.add_argument(
212
+ "-s",
213
+ "--source-yaml",
214
+ "--source_yaml",
215
+ help="path to source yaml file containing operator external definitions",
216
+ )
217
+ parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
218
+ parser.add_argument(
219
+ "--dry-run", "--dry_run", type=bool, default=False, help="output directory"
220
+ )
221
+ parser.add_argument(
222
+ "--impl-path",
223
+ "--impl_path",
224
+ type=str,
225
+ default=None,
226
+ help="path to the source C++ file containing kernel definitions",
227
+ )
228
+ parser.add_argument(
229
+ "--gen-ts-lowerings",
230
+ "--gen_ts_lowerings",
231
+ action="store_true",
232
+ help="Generate TorchScript lowerings in addition to Lazy IR and NativeFunctions",
233
+ )
234
+ parser.add_argument(
235
+ "--node-base",
236
+ "--node_base",
237
+ type=str,
238
+ default=default_args.node_base,
239
+ help="Name of backend specific custom Lazy IR Node base class",
240
+ )
241
+ parser.add_argument(
242
+ "--node-base-hdr",
243
+ "--node_base_hdr",
244
+ type=str,
245
+ default=default_args.node_base_hdr,
246
+ help="Path to header file defining custom Lazy IR Node base class",
247
+ )
248
+ parser.add_argument(
249
+ "--shape-inference-hdr",
250
+ "--shape_inference_hdr",
251
+ type=str,
252
+ default=default_args.shape_inference_hdr,
253
+ help="Path to header file defining custom Lazy shape inference functions",
254
+ )
255
+ parser.add_argument(
256
+ "--tensor-class",
257
+ "--tensor_class",
258
+ type=str,
259
+ default=default_args.tensor_class,
260
+ help="Name of backend specific custom Lazy Tensor class",
261
+ )
262
+ parser.add_argument(
263
+ "--tensor-class-hdr",
264
+ "--tensor_class_hdr",
265
+ type=str,
266
+ default=default_args.tensor_class_hdr,
267
+ help="Path to header file defining custom Lazy Tensor class",
268
+ )
269
+ parser.add_argument(
270
+ "--backend-name",
271
+ "--backend_name",
272
+ type=str,
273
+ default=default_args.backend_name,
274
+ help="Name of the backend to generate",
275
+ )
276
+ options = parser.parse_args()
277
+
278
+ # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
279
+ torch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
280
+ aten_path = str(torch_root / "aten" / "src" / "ATen")
281
+ lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator
282
+ if options.gen_ts_lowerings:
283
+ lazy_ir_generator = GenTSLazyIR
284
+ native_func_definition_generator: Type[
285
+ GenLazyNativeFuncDefinition
286
+ ] = default_args.native_func_definition_generator
287
+
288
+ run_gen_lazy_tensor(
289
+ aten_path,
290
+ options.source_yaml,
291
+ options.output_dir,
292
+ options.dry_run,
293
+ options.impl_path,
294
+ options.node_base,
295
+ options.node_base_hdr,
296
+ options.tensor_class,
297
+ options.tensor_class_hdr,
298
+ options.shape_inference_hdr,
299
+ lazy_ir_generator,
300
+ native_func_definition_generator,
301
+ options.backend_name,
302
+ )
303
+
304
+
305
+ def run_gen_lazy_tensor(
306
+ aten_path: str,
307
+ source_yaml: str,
308
+ output_dir: str,
309
+ dry_run: bool,
310
+ impl_path: Optional[str],
311
+ node_base: str = default_args.node_base,
312
+ node_base_hdr: Optional[str] = default_args.node_base_hdr,
313
+ tensor_class: str = default_args.tensor_class,
314
+ tensor_class_hdr: str = default_args.tensor_class_hdr,
315
+ shape_inference_hdr: str = default_args.shape_inference_hdr,
316
+ lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator,
317
+ native_func_definition_generator: Type[
318
+ GenLazyNativeFuncDefinition
319
+ ] = default_args.native_func_definition_generator,
320
+ # build_in_tree is true for TS backend and affects include paths
321
+ build_in_tree: bool = False,
322
+ # per_operator_headers changes whether ATen/Functions.h or individual operator headers are used
323
+ # it must match how ATen was built
324
+ per_operator_headers: bool = False,
325
+ backend_name: str = default_args.backend_name,
326
+ gen_forced_fallback_code: bool = False,
327
+ use_lazy_shape: bool = True,
328
+ # the following arguments are temporary customization points for xla backend migration.
329
+ # do not rely on them otherwise, they should be removed once migration is complete
330
+ backend_namespace: str = "torch::lazy",
331
+ get_tensorlist: str = "GetTensorList",
332
+ get_tensor_or_wrap_number: str = "GetLtcTensorOrCreateForWrappedNumber",
333
+ try_get_tensor: str = "TryGetLtcTensor",
334
+ metrics_counter: str = 'TORCH_LAZY_FN_COUNTER("lazy::")',
335
+ create_tensor: str = "LazyTensor::Create",
336
+ create_from_first_tensor: bool = False,
337
+ create_aten_from_ltc_tensor: str = "torch::lazy::CreateAtenFromLtcTensor",
338
+ tuple_aten_from_ltc_tensors: str = "torch::lazy::TupleAtenFromLtcTensors",
339
+ lazy_value_class: str = "torch::lazy::Value",
340
+ lazy_tensor_ptr: str = "LazyTensorPtr",
341
+ get_device_fn: str = "torch::lazy::GetBackendDevice",
342
+ ) -> None:
343
+ lv_tokens = lazy_value_class.split("::")
344
+ lv_class = lv_tokens[-1]
345
+ lv_ns = "::".join(lv_tokens[:-1])
346
+ setValueT(BaseCppType(lv_ns, lv_class))
347
+ template_dir = os.path.join(aten_path, "templates")
348
+
349
+ def make_file_manager(install_dir: str) -> FileManager:
350
+ return FileManager(
351
+ install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
352
+ )
353
+
354
+ fm = make_file_manager(output_dir)
355
+
356
+ native_yaml_path = os.path.join(aten_path, "native/native_functions.yaml")
357
+ tags_yaml_path = os.path.join(aten_path, "native/tags.yaml")
358
+ parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
359
+ native_functions, backend_indices = (
360
+ parsed_yaml.native_functions,
361
+ parsed_yaml.backend_indices,
362
+ )
363
+ grouped_native_functions = get_grouped_native_functions(native_functions)
364
+
365
+ def sort_native_function(f: Union[NativeFunctionsGroup, NativeFunction]) -> str:
366
+ """
367
+ We sort the native function because of the note in concat_map_codegen.
368
+ TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
369
+ """
370
+ func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
371
+ return str(func.name.name)
372
+
373
+ grouped_native_functions = sorted(
374
+ grouped_native_functions, key=sort_native_function
375
+ )
376
+
377
+ parsed_backend_yaml = parse_backend_yaml(
378
+ source_yaml, grouped_native_functions, backend_indices
379
+ )
380
+ backend_key = parsed_backend_yaml.backend_key
381
+ autograd_key = parsed_backend_yaml.autograd_key
382
+ cpp_namespace = parsed_backend_yaml.cpp_namespace
383
+ backend_indices = parsed_backend_yaml.backend_indices
384
+ # the following 3 keys are all processed differently
385
+ # for full_codegen, we generate IR, kernels, etc
386
+ # for ir_gen, we generate only IR
387
+ # non_native is used to register kernels not declared in
388
+ # native_functions.yaml
389
+ full_codegen, non_native, ir_gen = parse_native_functions_keys(
390
+ source_yaml, grouped_native_functions
391
+ )
392
+
393
+ def concat_map_codegen(
394
+ func: Callable[[NativeFunction], Sequence[str]],
395
+ xs: Iterable[Union[NativeFunctionsGroup, NativeFunction]],
396
+ ops_list: List[OperatorName] = full_codegen,
397
+ ) -> Iterator[str]:
398
+ """
399
+ We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
400
+ only code-gen additional entries for the inplace variant for the native functions.
401
+ """
402
+
403
+ for x in xs:
404
+ fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]
405
+ for f in fs:
406
+ if f.func.name in ops_list:
407
+ yield from func(f)
408
+
409
+ selector = SelectiveBuilder.get_nop_selector()
410
+
411
+ assert backend_key is not None
412
+ class_name = backend_indices[backend_key].native_function_class_name()
413
+
414
+ if impl_path is not None:
415
+ error_on_missing_kernels(
416
+ native_functions,
417
+ backend_indices,
418
+ backend_key,
419
+ autograd_key,
420
+ class_name,
421
+ impl_path,
422
+ full_codegen,
423
+ )
424
+
425
+ """ Validate Shape Inference Definitions
426
+
427
+ Generated lazy native functions all perform shape inference, by first using a meta:: kernel
428
+ if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
429
+ knows the call signature for compute_shape_{op} because it matches the nativefunction (and meta::) signature,
430
+ so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
431
+ to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
432
+ the expected signature which can be copy-pasted into shape_inference.h.
433
+
434
+ compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported
435
+ to structured kernels.
436
+
437
+ See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information.
438
+ """
439
+ if shape_inference_hdr is not None:
440
+ expected_shape_infr_decls = list(
441
+ concat_map_codegen(
442
+ dest.GenLazyShapeInferenceDefinition(
443
+ backend_indices[backend_key], tensor_class
444
+ ),
445
+ grouped_native_functions,
446
+ )
447
+ )
448
+
449
+ validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls)
450
+ assert class_name is not None
451
+
452
+ # Generate nativefunction declarations
453
+ # Note, eager registrations is set to False for the lazy TS backend as another LTC backend
454
+ # may want to register their own lazy kernels instead of registering the TS ones.
455
+ # The registration will lazily happen when init_ts_backend is called.
456
+ gen_dispatchkey_nativefunc_headers(
457
+ fm,
458
+ class_name,
459
+ cpp_namespace,
460
+ backend_indices,
461
+ grouped_native_functions,
462
+ backend_key,
463
+ autograd_key,
464
+ backend_name,
465
+ )
466
+
467
+ # Generate Dispatcher registrations which hook up the nativefunctions
468
+ for dispatch_key in (
469
+ [backend_key] if autograd_key is None else [backend_key, autograd_key]
470
+ ):
471
+ gen_dispatcher_registrations(
472
+ fm,
473
+ output_dir,
474
+ class_name,
475
+ backend_indices,
476
+ grouped_native_functions,
477
+ backend_key,
478
+ dispatch_key,
479
+ selector,
480
+ build_in_tree=build_in_tree,
481
+ per_operator_headers=per_operator_headers,
482
+ backend_name=backend_name,
483
+ eager_registration=False,
484
+ )
485
+
486
+ # Generate native function impls that build IR nodes
487
+ ns_helper = NamespaceHelper(cpp_namespace)
488
+ fm.write_with_template(
489
+ f"{backend_key}NativeFunctions.cpp",
490
+ "DispatchKeyNativeFunctions.cpp",
491
+ lambda: {
492
+ "includes": [
493
+ f"#include <{path}>"
494
+ for path in [
495
+ tensor_class_hdr,
496
+ shape_inference_hdr,
497
+ "ATen/Functions.h",
498
+ "ATen/native/TensorConversions.h",
499
+ "ATen/NativeFunctions.h",
500
+ "ATen/CompositeExplicitAutogradNonFunctionalFunctions.h",
501
+ "ATen/MetaFunctions.h",
502
+ "ATen/Operators.h",
503
+ "ATen/native/CPUFallback.h",
504
+ "torch/csrc/lazy/core/ir_builder.h",
505
+ "torch/csrc/lazy/core/lazy_graph_executor.h",
506
+ "torch/csrc/lazy/core/metrics.h",
507
+ "torch/csrc/lazy/core/shape.h",
508
+ f"{output_dir}/{backend_key}NativeFunctions.h",
509
+ f"{output_dir}/LazyIr.h",
510
+ ]
511
+ + (
512
+ ["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"]
513
+ if gen_forced_fallback_code
514
+ else []
515
+ )
516
+ ],
517
+ "helper_fns": get_ltc_helper_fns(),
518
+ "native_functions_include": "",
519
+ "namespace_prologue": ns_helper.prologue,
520
+ "namespace_epilogue": ns_helper.epilogue,
521
+ "native_function_definitions": list(
522
+ concat_map_codegen(
523
+ native_func_definition_generator(
524
+ f"{backend_key}NativeFunctions",
525
+ backend_indices[backend_key],
526
+ tensor_class,
527
+ gen_forced_fallback_code,
528
+ backend_namespace,
529
+ get_tensorlist,
530
+ get_tensor_or_wrap_number,
531
+ try_get_tensor,
532
+ metrics_counter,
533
+ create_tensor,
534
+ create_from_first_tensor,
535
+ create_aten_from_ltc_tensor,
536
+ tuple_aten_from_ltc_tensors,
537
+ lazy_tensor_ptr,
538
+ get_device_fn,
539
+ ),
540
+ grouped_native_functions,
541
+ )
542
+ ),
543
+ },
544
+ )
545
+ # Generate IR node classes
546
+ lazy_ir_obj = lazy_ir_generator(
547
+ backend_indices[backend_key], backend_name, node_base, use_lazy_shape
548
+ )
549
+
550
+ fm.write_with_template(
551
+ "LazyIr.h",
552
+ "LazyIr.h",
553
+ lambda: {
554
+ "lazy_ir_sysinc": [
555
+ f"#include <{path}>"
556
+ for path in [
557
+ "ATen/core/Formatting.h",
558
+ "c10/core/ScalarType.h",
559
+ "c10/util/Optional.h",
560
+ "torch/csrc/lazy/core/hash.h",
561
+ "torch/csrc/lazy/core/ir.h",
562
+ "torch/csrc/lazy/core/shape.h",
563
+ "vector",
564
+ ]
565
+ ],
566
+ "lazy_ir_inc": [f'#include "{node_base_hdr}"']
567
+ if node_base_hdr is not None
568
+ else [],
569
+ "ir_declarations": list(
570
+ concat_map_codegen(
571
+ lazy_ir_obj, grouped_native_functions, full_codegen + ir_gen
572
+ )
573
+ ),
574
+ "namespace_prologue": ns_helper.prologue,
575
+ "namespace_epilogue": ns_helper.epilogue,
576
+ },
577
+ )
578
+
579
+ # Generate Non Native IR Node classes
580
+ fm.write_with_template(
581
+ "LazyNonNativeIr.h",
582
+ "LazyNonNativeIr.h",
583
+ lambda: {
584
+ "lazy_non_native_ir_inc": [
585
+ f"#include <{path}>"
586
+ for path in [
587
+ "torch/csrc/lazy/core/ir.h",
588
+ "torch/csrc/lazy/core/ir_builder.h",
589
+ "torch/csrc/lazy/core/internal_ops/ltc_ops.h",
590
+ "torch/csrc/lazy/core/shape_inference.h",
591
+ ]
592
+ + ([node_base_hdr] if node_base_hdr else [])
593
+ if path
594
+ ],
595
+ "non_native_ir_nodes": dest.generate_non_native_lazy_ir_nodes(
596
+ non_native, lazy_ir_obj
597
+ ),
598
+ "namespace_prologue": ns_helper.prologue,
599
+ "namespace_epilogue": ns_helper.epilogue,
600
+ },
601
+ )
602
+
603
+
604
+ if __name__ == "__main__":
605
+ main()
env-llmeval/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import textwrap
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional, Sequence, Tuple
4
+
5
+ from torchgen.api.translate import translate
6
+ from torchgen.api.types import DispatcherSignature
7
+ from torchgen.context import method_with_native_function
8
+ from torchgen.model import (
9
+ Argument,
10
+ BaseTy,
11
+ BaseType,
12
+ FunctionSchema,
13
+ ListType,
14
+ NativeFunction,
15
+ OptionalType,
16
+ Return,
17
+ SchemaKind,
18
+ Type,
19
+ )
20
+ from torchgen.utils import mapMaybe
21
+
22
+
23
+ def is_tensor(typ: Type) -> bool:
24
+ return isinstance(typ, BaseType) and typ.name == BaseTy.Tensor
25
+
26
+
27
+ def is_optional_tensor(typ: Type) -> bool:
28
+ return isinstance(typ, OptionalType) and is_tensor(typ.elem)
29
+
30
+
31
+ def is_tensor_list(typ: Type) -> bool:
32
+ return isinstance(typ, ListType) and is_tensor(typ.elem)
33
+
34
+
35
+ def unwrap_tensor(name: str, cur_level_var: str) -> List[str]:
36
+ result = f"""\
37
+ Tensor {name}_value;
38
+ optional<int64_t> {name}_bdim;
39
+ std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}, {cur_level_var});"""
40
+ return textwrap.dedent(result).split("\n")
41
+
42
+
43
+ def unwrap_optional_tensor(name: str, cur_level_var: str) -> List[str]:
44
+ result = f"""\
45
+ optional<Tensor> {name}_value;
46
+ optional<int64_t> {name}_bdim;
47
+ if ({name}) {{
48
+ std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}.value(), {cur_level_var});
49
+ }}"""
50
+ return textwrap.dedent(result).split("\n")
51
+
52
+
53
+ def gen_unwraps(
54
+ flat_arguments: Sequence[Argument], cur_level_var: str
55
+ ) -> Tuple[str, List[str]]:
56
+ arg_names = [a.name for a in flat_arguments]
57
+ arg_types = [a.type for a in flat_arguments]
58
+
59
+ tensors = [name for typ, name in zip(arg_types, arg_names) if is_tensor(typ)]
60
+ optional_tensors = [
61
+ name for typ, name in zip(arg_types, arg_names) if is_optional_tensor(typ)
62
+ ]
63
+
64
+ unwraps = []
65
+ for tensor in tensors:
66
+ unwraps += unwrap_tensor(tensor, cur_level_var)
67
+
68
+ for opt_tensor in optional_tensors:
69
+ unwraps += unwrap_optional_tensor(opt_tensor, cur_level_var)
70
+ unwrap_code = "\n".join(unwraps)
71
+
72
+ unwrapped_arg_list = []
73
+ for arg in arg_names:
74
+ if arg in tensors or arg in optional_tensors:
75
+ unwrapped_arg_list += [f"{arg}_value", f"{arg}_bdim"]
76
+ else:
77
+ unwrapped_arg_list.append(arg)
78
+ return unwrap_code, unwrapped_arg_list
79
+
80
+
81
+ def gen_case_where_all_bdims_are_none(
82
+ outer_sig: DispatcherSignature, schema: FunctionSchema, cur_level_var: str
83
+ ) -> str:
84
+ conditions = []
85
+ flat_args = schema.arguments.flat_all
86
+ for arg in flat_args:
87
+ if not arg.type.is_tensor_like():
88
+ continue
89
+ conditions.append(f"!isBatchedAtLevel({arg.name}, {cur_level_var})")
90
+
91
+ sig = DispatcherSignature.from_schema(schema)
92
+ translated_args = ", ".join(
93
+ e.expr for e in translate(outer_sig.arguments(), sig.arguments())
94
+ )
95
+ return f"""\
96
+ if ({' && '.join(conditions)}) {{
97
+ return at::_ops::{sig.func.name.unambiguous_name()}::call({translated_args});
98
+ }}"""
99
+
100
+
101
+ def gen_returns(
102
+ returns: Tuple[Return, ...], cur_level_var: str, results_var: str
103
+ ) -> str:
104
+ idx = 0
105
+ wrapped_returns = []
106
+ for ret in returns:
107
+ if is_tensor(ret.type):
108
+ wrapped_returns.append(
109
+ f"makeBatched(std::get<{idx}>({results_var}), std::get<{idx + 1}>({results_var}), {cur_level_var})"
110
+ )
111
+ idx += 2
112
+ elif is_tensor_list(ret.type):
113
+ wrapped_returns.append(
114
+ f"makeBatchedVector(std::get<{idx}>({results_var}), std::get<{idx+1}>({results_var}), {cur_level_var})"
115
+ )
116
+ idx += 2
117
+ else:
118
+ wrapped_returns.append(f"std::get<{idx}>({results_var})")
119
+ idx += 1
120
+ if len(wrapped_returns) == 1:
121
+ result = f"return {wrapped_returns[0]};"
122
+ else:
123
+ result = f'return std::make_tuple({", ".join(wrapped_returns)});'
124
+ return result
125
+
126
+
127
+ def accepts_at_least_one_tensor_input(schema: FunctionSchema) -> bool:
128
+ return any(a.type.is_tensor_like() for a in schema.arguments.flat_all)
129
+
130
+
131
+ def is_mutated_arg(argument: Argument) -> bool:
132
+ return argument.annotation is not None and argument.annotation.is_write
133
+
134
+
135
+ def gen_vmap_inplace_plumbing(native_function: NativeFunction) -> Optional[str]:
136
+ # Assumptions:
137
+ # - only one argument is being modified in-place
138
+ # - the argument that is being modified in-place is the first argument
139
+ # - all returns are either Tensor, tuple of Tensor, or TensorList
140
+ schema = native_function.func
141
+ sig = DispatcherSignature.from_schema(schema)
142
+ returns = schema.returns
143
+
144
+ # Check assumptions. If these are invalid we return None
145
+ # and punt the work to handle them to the future.
146
+ assert schema.kind() == SchemaKind.inplace
147
+ if not is_mutated_arg(schema.arguments.flat_all[0]):
148
+ return None
149
+ if not len([arg for arg in schema.arguments.flat_all if is_mutated_arg(arg)]) == 1:
150
+ return None
151
+
152
+ # Only support cases where all returns are Tensors or vector<Tensor>
153
+ if len(returns) == 0:
154
+ return None
155
+ if not all(is_tensor(ret.type) or is_tensor_list(ret.type) for ret in returns):
156
+ return None
157
+ if not accepts_at_least_one_tensor_input(schema):
158
+ return None
159
+
160
+ cur_level_var = "cur_level"
161
+
162
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
163
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
164
+
165
+ return f"""\
166
+ template <typename batch_rule_t, batch_rule_t batch_rule>
167
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
168
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
169
+ auto maybe_layer = maybeCurrentDynamicLayer();
170
+ vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
171
+ int64_t {cur_level_var} = maybe_layer->layerId();
172
+ {textwrap.indent(bdims_all_none_case, " ")}
173
+ {textwrap.indent(unwraps, " ")}
174
+ batch_rule({', '.join(unwrapped_arg_list)});
175
+ return {schema.arguments.flat_all[0].name};
176
+ }}"""
177
+
178
+
179
+ def gen_vmap_plumbing_no_returns(native_function: NativeFunction) -> str:
180
+ schema = native_function.func
181
+ sig = DispatcherSignature.from_schema(schema)
182
+ cur_level_var = "cur_level"
183
+
184
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
185
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
186
+
187
+ return f"""\
188
+ template <typename batch_rule_t, batch_rule_t batch_rule>
189
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
190
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
191
+ auto maybe_layer = maybeCurrentDynamicLayer();
192
+ vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
193
+ int64_t {cur_level_var} = maybe_layer->layerId();
194
+ {textwrap.indent(bdims_all_none_case, " ")}
195
+ {textwrap.indent(unwraps, " ")}
196
+ batch_rule({', '.join(unwrapped_arg_list)});
197
+ }}"""
198
+
199
+
200
+ def gen_vmap_plumbing(native_function: NativeFunction) -> Optional[str]:
201
+ schema = native_function.func
202
+ sig = DispatcherSignature.from_schema(schema)
203
+ returns = schema.returns
204
+
205
+ # Only support cases where all returns are Tensors or vector<Tensor>
206
+ if not accepts_at_least_one_tensor_input(schema):
207
+ return None
208
+ if len(returns) == 0:
209
+ return gen_vmap_plumbing_no_returns(native_function)
210
+ if not all(ret.type.is_tensor_like() for ret in returns):
211
+ return None
212
+ # in-place views need special handling
213
+ if "inplace_view" in native_function.tags:
214
+ return None
215
+
216
+ if schema.kind() == SchemaKind.inplace:
217
+ return gen_vmap_inplace_plumbing(native_function)
218
+
219
+ # Don't support these (mutable, out, scratch)
220
+ if schema.kind() != SchemaKind.functional:
221
+ return None
222
+
223
+ results_var = "results"
224
+ cur_level_var = "cur_level"
225
+
226
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
227
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
228
+
229
+ wrapped_returns = gen_returns(returns, cur_level_var, results_var)
230
+ return f"""\
231
+ template <typename batch_rule_t, batch_rule_t batch_rule>
232
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
233
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
234
+ auto maybe_layer = maybeCurrentDynamicLayer();
235
+ vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
236
+ int64_t {cur_level_var} = maybe_layer->layerId();
237
+ {textwrap.indent(bdims_all_none_case, " ")}
238
+ {textwrap.indent(unwraps, " ")}
239
+ auto {results_var} = batch_rule({', '.join(unwrapped_arg_list)});
240
+ {wrapped_returns}
241
+ }}"""
242
+
243
+
244
+ @dataclass(frozen=True)
245
+ class ComputeBatchRulePlumbing:
246
+ @method_with_native_function
247
+ def __call__(self, f: NativeFunction) -> Optional[str]:
248
+ opname = str(f.func.name)
249
+ result = gen_vmap_plumbing(f)
250
+ return result
251
+
252
+
253
+ def gen_all_vmap_plumbing(native_functions: Sequence[NativeFunction]) -> str:
254
+ body = "\n".join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions)))
255
+ return f"""
256
+ #pragma once
257
+ #include <ATen/Operators.h>
258
+ #include <ATen/functorch/PlumbingHelper.h>
259
+
260
+ namespace at {{ namespace functorch {{
261
+
262
+ {body}
263
+
264
+ }}}} // namespace at::functorch
265
+ """