applied-ai-018 commited on
Commit
93e2cf6
·
verified ·
1 Parent(s): 956e414

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/__version__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_base.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_common.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_const.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_filename.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_filepath.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_ltsv.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_symbol.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_types.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/argparse.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/click.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/error.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/handler.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pathvalidate/_common.py +140 -0
  16. env-llmeval/lib/python3.10/site-packages/pathvalidate/_filename.py +466 -0
  17. env-llmeval/lib/python3.10/site-packages/pathvalidate/_filepath.py +516 -0
  18. env-llmeval/lib/python3.10/site-packages/pathvalidate/_ltsv.py +43 -0
  19. env-llmeval/lib/python3.10/site-packages/pkg_resources/__init__.py +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py +608 -0
  26. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py +136 -0
  35. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py +484 -0
  36. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py +136 -0
  37. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py +504 -0
  38. env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing.py +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pkg_resources/extern/__init__.py +73 -0
  40. env-llmeval/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/setup.py +6 -0
  43. env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/INSTALLER +1 -0
  44. env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/LICENSE.txt +208 -0
  45. env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/METADATA +1076 -0
  46. env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/RECORD +15 -0
  47. env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/WHEEL +6 -0
  48. env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/top_level.txt +1 -0
  49. env-llmeval/lib/python3.10/site-packages/sacrebleu/__init__.py +43 -0
  50. env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py +11 -0
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/__version__.cpython-310.pyc ADDED
Binary file (383 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_base.cpython-310.pyc ADDED
Binary file (7.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_common.cpython-310.pyc ADDED
Binary file (3.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_const.cpython-310.pyc ADDED
Binary file (797 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_filename.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_filepath.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_ltsv.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_symbol.cpython-310.pyc ADDED
Binary file (2.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/_types.cpython-310.pyc ADDED
Binary file (360 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/argparse.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/click.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/error.cpython-310.pyc ADDED
Binary file (7.89 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/__pycache__/handler.cpython-310.pyc ADDED
Binary file (4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pathvalidate/_common.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import platform
6
+ import re
7
+ import string
8
+ from pathlib import PurePath
9
+ from typing import Any, List, Optional
10
+
11
+ from ._const import Platform
12
+ from ._types import PathType, PlatformType
13
+
14
+
15
+ _re_whitespaces = re.compile(r"^[\s]+$")
16
+
17
+
18
+ def validate_pathtype(
19
+ text: PathType, allow_whitespaces: bool = False, error_msg: Optional[str] = None
20
+ ) -> None:
21
+ from .error import ErrorReason, ValidationError
22
+
23
+ if _is_not_null_string(text) or isinstance(text, PurePath):
24
+ return
25
+
26
+ if allow_whitespaces and _re_whitespaces.search(str(text)):
27
+ return
28
+
29
+ if is_null_string(text):
30
+ raise ValidationError(reason=ErrorReason.NULL_NAME)
31
+
32
+ raise TypeError(f"text must be a string: actual={type(text)}")
33
+
34
+
35
+ def to_str(name: PathType) -> str:
36
+ if isinstance(name, PurePath):
37
+ return str(name)
38
+
39
+ return name
40
+
41
+
42
+ def is_null_string(value: Any) -> bool:
43
+ if value is None:
44
+ return True
45
+
46
+ try:
47
+ return len(value.strip()) == 0
48
+ except AttributeError:
49
+ return False
50
+
51
+
52
+ def _is_not_null_string(value: Any) -> bool:
53
+ try:
54
+ return len(value.strip()) > 0
55
+ except AttributeError:
56
+ return False
57
+
58
+
59
+ def _get_unprintable_ascii_chars() -> List[str]:
60
+ return [chr(c) for c in range(128) if chr(c) not in string.printable]
61
+
62
+
63
+ unprintable_ascii_chars = tuple(_get_unprintable_ascii_chars())
64
+
65
+
66
+ def _get_ascii_symbols() -> List[str]:
67
+ symbol_list: List[str] = []
68
+
69
+ for i in range(128):
70
+ c = chr(i)
71
+
72
+ if c in unprintable_ascii_chars or c in string.digits + string.ascii_letters:
73
+ continue
74
+
75
+ symbol_list.append(c)
76
+
77
+ return symbol_list
78
+
79
+
80
+ ascii_symbols = tuple(_get_ascii_symbols())
81
+
82
+ __RE_UNPRINTABLE_CHARS = re.compile(
83
+ "[{}]".format(re.escape("".join(unprintable_ascii_chars))), re.UNICODE
84
+ )
85
+ __RE_ANSI_ESCAPE = re.compile(
86
+ r"(?:\x1B[@-Z\\-_]|[\x80-\x9A\x9C-\x9F]|(?:\x1B\[|\x9B)[0-?]*[ -/]*[@-~])"
87
+ )
88
+
89
+
90
+ def validate_unprintable_char(text: str) -> None:
91
+ from .error import InvalidCharError
92
+
93
+ match_list = __RE_UNPRINTABLE_CHARS.findall(to_str(text))
94
+ if match_list:
95
+ raise InvalidCharError(f"unprintable character found: {match_list}")
96
+
97
+
98
+ def replace_unprintable_char(text: str, replacement_text: str = "") -> str:
99
+ try:
100
+ return __RE_UNPRINTABLE_CHARS.sub(replacement_text, text)
101
+ except (TypeError, AttributeError):
102
+ raise TypeError("text must be a string")
103
+
104
+
105
+ def replace_ansi_escape(text: str, replacement_text: str = "") -> str:
106
+ try:
107
+ return __RE_ANSI_ESCAPE.sub(replacement_text, text)
108
+ except (TypeError, AttributeError):
109
+ raise TypeError("text must be a string")
110
+
111
+
112
+ def normalize_platform(name: Optional[PlatformType]) -> Platform:
113
+ if isinstance(name, Platform):
114
+ return name
115
+
116
+ if not name:
117
+ return Platform.UNIVERSAL
118
+
119
+ name = name.strip().casefold()
120
+
121
+ if name == "posix":
122
+ return Platform.POSIX
123
+
124
+ if name == "auto":
125
+ name = platform.system().casefold()
126
+
127
+ if name in ["linux"]:
128
+ return Platform.LINUX
129
+
130
+ if name and name.startswith("win"):
131
+ return Platform.WINDOWS
132
+
133
+ if name in ["mac", "macos", "darwin"]:
134
+ return Platform.MACOS
135
+
136
+ return Platform.UNIVERSAL
137
+
138
+
139
+ def findall_to_str(match: List[Any]) -> str:
140
+ return ", ".join([repr(text) for text in match])
env-llmeval/lib/python3.10/site-packages/pathvalidate/_filename.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import itertools
6
+ import ntpath
7
+ import posixpath
8
+ import re
9
+ import warnings
10
+ from pathlib import Path, PurePath
11
+ from typing import Optional, Pattern, Sequence, Tuple
12
+
13
+ from ._base import AbstractSanitizer, AbstractValidator, BaseFile, BaseValidator
14
+ from ._common import findall_to_str, to_str, validate_pathtype
15
+ from ._const import DEFAULT_MIN_LEN, INVALID_CHAR_ERR_MSG_TMPL, Platform
16
+ from ._types import PathType, PlatformType
17
+ from .error import ErrorAttrKey, ErrorReason, InvalidCharError, ValidationError
18
+ from .handler import ReservedNameHandler, ValidationErrorHandler
19
+
20
+
21
+ _DEFAULT_MAX_FILENAME_LEN = 255
22
+ _RE_INVALID_FILENAME = re.compile(f"[{re.escape(BaseFile._INVALID_FILENAME_CHARS):s}]", re.UNICODE)
23
+ _RE_INVALID_WIN_FILENAME = re.compile(
24
+ f"[{re.escape(BaseFile._INVALID_WIN_FILENAME_CHARS):s}]", re.UNICODE
25
+ )
26
+
27
+
28
+ class FileNameSanitizer(AbstractSanitizer):
29
+ def __init__(
30
+ self,
31
+ max_len: int = _DEFAULT_MAX_FILENAME_LEN,
32
+ fs_encoding: Optional[str] = None,
33
+ platform: Optional[PlatformType] = None,
34
+ null_value_handler: Optional[ValidationErrorHandler] = None,
35
+ reserved_name_handler: Optional[ValidationErrorHandler] = None,
36
+ additional_reserved_names: Optional[Sequence[str]] = None,
37
+ validate_after_sanitize: bool = False,
38
+ validator: Optional[AbstractValidator] = None,
39
+ ) -> None:
40
+ if validator:
41
+ fname_validator = validator
42
+ else:
43
+ fname_validator = FileNameValidator(
44
+ min_len=DEFAULT_MIN_LEN,
45
+ max_len=max_len,
46
+ fs_encoding=fs_encoding,
47
+ check_reserved=True,
48
+ additional_reserved_names=additional_reserved_names,
49
+ platform=platform,
50
+ )
51
+
52
+ super().__init__(
53
+ max_len=max_len,
54
+ fs_encoding=fs_encoding,
55
+ null_value_handler=null_value_handler,
56
+ reserved_name_handler=reserved_name_handler,
57
+ additional_reserved_names=additional_reserved_names,
58
+ platform_max_len=_DEFAULT_MAX_FILENAME_LEN,
59
+ platform=platform,
60
+ validate_after_sanitize=validate_after_sanitize,
61
+ validator=fname_validator,
62
+ )
63
+
64
+ self._sanitize_regexp = self._get_sanitize_regexp()
65
+
66
+ def sanitize(self, value: PathType, replacement_text: str = "") -> PathType:
67
+ try:
68
+ validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
69
+ except ValidationError as e:
70
+ if e.reason == ErrorReason.NULL_NAME:
71
+ if isinstance(value, PurePath):
72
+ raise
73
+
74
+ return self._null_value_handler(e)
75
+ raise
76
+
77
+ sanitized_filename = self._sanitize_regexp.sub(replacement_text, str(value))
78
+ sanitized_filename = sanitized_filename[: self.max_len]
79
+
80
+ try:
81
+ self._validator.validate(sanitized_filename)
82
+ except ValidationError as e:
83
+ if e.reason == ErrorReason.RESERVED_NAME:
84
+ replacement_word = self._reserved_name_handler(e)
85
+ if e.reserved_name != replacement_word:
86
+ sanitized_filename = re.sub(
87
+ re.escape(e.reserved_name), replacement_word, sanitized_filename
88
+ )
89
+ elif e.reason == ErrorReason.INVALID_CHARACTER and self._is_windows(
90
+ include_universal=True
91
+ ):
92
+ # Do not start a file or directory name with a space
93
+ sanitized_filename = sanitized_filename.lstrip(" ")
94
+
95
+ # Do not end a file or directory name with a space or a period
96
+ sanitized_filename = sanitized_filename.rstrip(" ")
97
+ if sanitized_filename not in (".", ".."):
98
+ sanitized_filename = sanitized_filename.rstrip(" .")
99
+ elif e.reason == ErrorReason.NULL_NAME:
100
+ sanitized_filename = self._null_value_handler(e)
101
+
102
+ if self._validate_after_sanitize:
103
+ try:
104
+ self._validator.validate(sanitized_filename)
105
+ except ValidationError as e:
106
+ raise ValidationError(
107
+ description=str(e),
108
+ reason=ErrorReason.INVALID_AFTER_SANITIZE,
109
+ platform=self.platform,
110
+ )
111
+
112
+ if isinstance(value, PurePath):
113
+ return Path(sanitized_filename)
114
+
115
+ return sanitized_filename
116
+
117
+ def _get_sanitize_regexp(self) -> Pattern[str]:
118
+ if self._is_windows(include_universal=True):
119
+ return _RE_INVALID_WIN_FILENAME
120
+
121
+ return _RE_INVALID_FILENAME
122
+
123
+
124
+ class FileNameValidator(BaseValidator):
125
+ _WINDOWS_RESERVED_FILE_NAMES = ("CON", "PRN", "AUX", "CLOCK$", "NUL") + tuple(
126
+ f"{name:s}{num:d}" for name, num in itertools.product(("COM", "LPT"), range(1, 10))
127
+ )
128
+ _MACOS_RESERVED_FILE_NAMES = (":",)
129
+
130
+ @property
131
+ def reserved_keywords(self) -> Tuple[str, ...]:
132
+ common_keywords = super().reserved_keywords
133
+
134
+ if self._is_universal():
135
+ word_set = set(
136
+ common_keywords
137
+ + self._WINDOWS_RESERVED_FILE_NAMES
138
+ + self._MACOS_RESERVED_FILE_NAMES
139
+ )
140
+ elif self._is_windows():
141
+ word_set = set(common_keywords + self._WINDOWS_RESERVED_FILE_NAMES)
142
+ elif self._is_posix() or self._is_macos():
143
+ word_set = set(common_keywords + self._MACOS_RESERVED_FILE_NAMES)
144
+ else:
145
+ word_set = set(common_keywords)
146
+
147
+ return tuple(sorted(word_set))
148
+
149
+ def __init__(
150
+ self,
151
+ min_len: int = DEFAULT_MIN_LEN,
152
+ max_len: int = _DEFAULT_MAX_FILENAME_LEN,
153
+ fs_encoding: Optional[str] = None,
154
+ platform: Optional[PlatformType] = None,
155
+ check_reserved: bool = True,
156
+ additional_reserved_names: Optional[Sequence[str]] = None,
157
+ ) -> None:
158
+ super().__init__(
159
+ min_len=min_len,
160
+ max_len=max_len,
161
+ fs_encoding=fs_encoding,
162
+ check_reserved=check_reserved,
163
+ additional_reserved_names=additional_reserved_names,
164
+ platform_max_len=_DEFAULT_MAX_FILENAME_LEN,
165
+ platform=platform,
166
+ )
167
+
168
+ def validate(self, value: PathType) -> None:
169
+ validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
170
+
171
+ unicode_filename = to_str(value)
172
+ byte_ct = len(unicode_filename.encode(self._fs_encoding))
173
+
174
+ self.validate_abspath(unicode_filename)
175
+
176
+ err_kwargs = {
177
+ ErrorAttrKey.REASON: ErrorReason.INVALID_LENGTH,
178
+ ErrorAttrKey.PLATFORM: self.platform,
179
+ ErrorAttrKey.FS_ENCODING: self._fs_encoding,
180
+ ErrorAttrKey.BYTE_COUNT: byte_ct,
181
+ }
182
+ if byte_ct > self.max_len:
183
+ raise ValidationError(
184
+ [
185
+ f"filename is too long: expected<={self.max_len:d} bytes, actual={byte_ct:d} bytes"
186
+ ],
187
+ **err_kwargs,
188
+ )
189
+ if byte_ct < self.min_len:
190
+ raise ValidationError(
191
+ [
192
+ f"filename is too short: expected>={self.min_len:d} bytes, actual={byte_ct:d} bytes"
193
+ ],
194
+ **err_kwargs,
195
+ )
196
+
197
+ self._validate_reserved_keywords(unicode_filename)
198
+ self.__validate_universal_filename(unicode_filename)
199
+
200
+ if self._is_windows(include_universal=True):
201
+ self.__validate_win_filename(unicode_filename)
202
+
203
+ def validate_abspath(self, value: str) -> None:
204
+ err = ValidationError(
205
+ description=f"found an absolute path ({value}), expected a filename",
206
+ platform=self.platform,
207
+ reason=ErrorReason.FOUND_ABS_PATH,
208
+ )
209
+
210
+ if self._is_windows(include_universal=True):
211
+ if ntpath.isabs(value):
212
+ raise err
213
+
214
+ if posixpath.isabs(value):
215
+ raise err
216
+
217
+ def __validate_universal_filename(self, unicode_filename: str) -> None:
218
+ match = _RE_INVALID_FILENAME.findall(unicode_filename)
219
+ if match:
220
+ raise InvalidCharError(
221
+ INVALID_CHAR_ERR_MSG_TMPL.format(
222
+ invalid=findall_to_str(match), value=repr(unicode_filename)
223
+ ),
224
+ platform=Platform.UNIVERSAL,
225
+ )
226
+
227
+ def __validate_win_filename(self, unicode_filename: str) -> None:
228
+ match = _RE_INVALID_WIN_FILENAME.findall(unicode_filename)
229
+ if match:
230
+ raise InvalidCharError(
231
+ INVALID_CHAR_ERR_MSG_TMPL.format(
232
+ invalid=findall_to_str(match), value=repr(unicode_filename)
233
+ ),
234
+ platform=Platform.WINDOWS,
235
+ )
236
+
237
+ if unicode_filename in (".", ".."):
238
+ return
239
+
240
+ KB2829981_err_tmpl = "{}. Refer: https://learn.microsoft.com/en-us/troubleshoot/windows-client/shell-experience/file-folder-name-whitespace-characters" # noqa: E501
241
+
242
+ if unicode_filename[-1] in (" ", "."):
243
+ raise InvalidCharError(
244
+ INVALID_CHAR_ERR_MSG_TMPL.format(
245
+ invalid=re.escape(unicode_filename[-1]), value=repr(unicode_filename)
246
+ ),
247
+ platform=Platform.WINDOWS,
248
+ description=KB2829981_err_tmpl.format(
249
+ "Do not end a file or directory name with a space or a period"
250
+ ),
251
+ )
252
+
253
+ if unicode_filename[0] in (" "):
254
+ raise InvalidCharError(
255
+ INVALID_CHAR_ERR_MSG_TMPL.format(
256
+ invalid=re.escape(unicode_filename[0]), value=repr(unicode_filename)
257
+ ),
258
+ platform=Platform.WINDOWS,
259
+ description=KB2829981_err_tmpl.format(
260
+ "Do not start a file or directory name with a space"
261
+ ),
262
+ )
263
+
264
+
265
+ def validate_filename(
266
+ filename: PathType,
267
+ platform: Optional[PlatformType] = None,
268
+ min_len: int = DEFAULT_MIN_LEN,
269
+ max_len: int = _DEFAULT_MAX_FILENAME_LEN,
270
+ fs_encoding: Optional[str] = None,
271
+ check_reserved: bool = True,
272
+ additional_reserved_names: Optional[Sequence[str]] = None,
273
+ ) -> None:
274
+ """Verifying whether the ``filename`` is a valid file name or not.
275
+
276
+ Args:
277
+ filename:
278
+ Filename to validate.
279
+ platform:
280
+ Target platform name of the filename.
281
+
282
+ .. include:: platform.txt
283
+ min_len:
284
+ Minimum byte length of the ``filename``. The value must be greater or equal to one.
285
+ Defaults to ``1``.
286
+ max_len:
287
+ Maximum byte length of the ``filename``. The value must be lower than:
288
+
289
+ - ``Linux``: 4096
290
+ - ``macOS``: 1024
291
+ - ``Windows``: 260
292
+ - ``universal``: 260
293
+
294
+ Defaults to ``255``.
295
+ fs_encoding:
296
+ Filesystem encoding that used to calculate the byte length of the filename.
297
+ If |None|, get the value from the execution environment.
298
+ check_reserved:
299
+ If |True|, check reserved names of the ``platform``.
300
+ additional_reserved_names:
301
+ Additional reserved names to check.
302
+ Case insensitive.
303
+
304
+ Raises:
305
+ ValidationError (ErrorReason.INVALID_LENGTH):
306
+ If the ``filename`` is longer than ``max_len`` characters.
307
+ ValidationError (ErrorReason.INVALID_CHARACTER):
308
+ If the ``filename`` includes invalid character(s) for a filename:
309
+ |invalid_filename_chars|.
310
+ The following characters are also invalid for Windows platforms:
311
+ |invalid_win_filename_chars|.
312
+ ValidationError (ErrorReason.RESERVED_NAME):
313
+ If the ``filename`` equals reserved name by OS.
314
+ Windows reserved name is as follows:
315
+ ``"CON"``, ``"PRN"``, ``"AUX"``, ``"NUL"``, ``"COM[1-9]"``, ``"LPT[1-9]"``.
316
+
317
+ Example:
318
+ :ref:`example-validate-filename`
319
+
320
+ See Also:
321
+ `Naming Files, Paths, and Namespaces - Win32 apps | Microsoft Docs
322
+ <https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file>`__
323
+ """
324
+
325
+ FileNameValidator(
326
+ platform=platform,
327
+ min_len=min_len,
328
+ max_len=max_len,
329
+ fs_encoding=fs_encoding,
330
+ check_reserved=check_reserved,
331
+ additional_reserved_names=additional_reserved_names,
332
+ ).validate(filename)
333
+
334
+
335
+ def is_valid_filename(
336
+ filename: PathType,
337
+ platform: Optional[PlatformType] = None,
338
+ min_len: int = DEFAULT_MIN_LEN,
339
+ max_len: Optional[int] = None,
340
+ fs_encoding: Optional[str] = None,
341
+ check_reserved: bool = True,
342
+ additional_reserved_names: Optional[Sequence[str]] = None,
343
+ ) -> bool:
344
+ """Check whether the ``filename`` is a valid name or not.
345
+
346
+ Args:
347
+ filename:
348
+ A filename to be checked.
349
+ platform:
350
+ Target platform name of the filename.
351
+
352
+ Example:
353
+ :ref:`example-is-valid-filename`
354
+
355
+ See Also:
356
+ :py:func:`.validate_filename()`
357
+ """
358
+
359
+ return FileNameValidator(
360
+ platform=platform,
361
+ min_len=min_len,
362
+ max_len=-1 if max_len is None else max_len,
363
+ fs_encoding=fs_encoding,
364
+ check_reserved=check_reserved,
365
+ additional_reserved_names=additional_reserved_names,
366
+ ).is_valid(filename)
367
+
368
+
369
+ def sanitize_filename(
370
+ filename: PathType,
371
+ replacement_text: str = "",
372
+ platform: Optional[PlatformType] = None,
373
+ max_len: Optional[int] = _DEFAULT_MAX_FILENAME_LEN,
374
+ fs_encoding: Optional[str] = None,
375
+ check_reserved: Optional[bool] = None,
376
+ null_value_handler: Optional[ValidationErrorHandler] = None,
377
+ reserved_name_handler: Optional[ValidationErrorHandler] = None,
378
+ additional_reserved_names: Optional[Sequence[str]] = None,
379
+ validate_after_sanitize: bool = False,
380
+ ) -> PathType:
381
+ """Make a valid filename from a string.
382
+
383
+ To make a valid filename, the function does the following:
384
+
385
+ - Replace invalid characters as file names included in the ``filename``
386
+ with the ``replacement_text``. Invalid characters are:
387
+
388
+ - unprintable characters
389
+ - |invalid_filename_chars|
390
+ - for Windows (or universal) only: |invalid_win_filename_chars|
391
+
392
+ - Replace a value if a sanitized value is a reserved name by operating systems
393
+ with a specified handler by ``reserved_name_handler``.
394
+
395
+ Args:
396
+ filename: Filename to sanitize.
397
+ replacement_text:
398
+ Replacement text for invalid characters. Defaults to ``""``.
399
+ platform:
400
+ Target platform name of the filename.
401
+
402
+ .. include:: platform.txt
403
+ max_len:
404
+ Maximum byte length of the ``filename``.
405
+ Truncate the name length if the ``filename`` length exceeds this value.
406
+ Defaults to ``255``.
407
+ fs_encoding:
408
+ Filesystem encoding that used to calculate the byte length of the filename.
409
+ If |None|, get the value from the execution environment.
410
+ check_reserved:
411
+ [Deprecated] Use 'reserved_name_handler' instead.
412
+ null_value_handler:
413
+ Function called when a value after sanitization is an empty string.
414
+ You can specify predefined handlers:
415
+
416
+ - :py:func:`~.handler.NullValueHandler.return_null_string`
417
+ - :py:func:`~.handler.NullValueHandler.return_timestamp`
418
+ - :py:func:`~.handler.raise_error`
419
+
420
+ Defaults to :py:func:`.handler.NullValueHandler.return_null_string` that just return ``""``.
421
+ reserved_name_handler:
422
+ Function called when a value after sanitization is a reserved name.
423
+ You can specify predefined handlers:
424
+
425
+ - :py:meth:`~.handler.ReservedNameHandler.add_leading_underscore`
426
+ - :py:meth:`~.handler.ReservedNameHandler.add_trailing_underscore`
427
+ - :py:meth:`~.handler.ReservedNameHandler.as_is`
428
+ - :py:func:`~.handler.raise_error`
429
+
430
+ Defaults to :py:func:`.handler.add_trailing_underscore`.
431
+ additional_reserved_names:
432
+ Additional reserved names to sanitize.
433
+ Case insensitive.
434
+ validate_after_sanitize:
435
+ Execute validation after sanitization to the file name.
436
+
437
+ Returns:
438
+ Same type as the ``filename`` (str or PathLike object):
439
+ Sanitized filename.
440
+
441
+ Raises:
442
+ ValueError:
443
+ If the ``filename`` is an invalid filename.
444
+
445
+ Example:
446
+ :ref:`example-sanitize-filename`
447
+ """
448
+
449
+ if check_reserved is not None:
450
+ warnings.warn(
451
+ "'check_reserved' is deprecated. Use 'reserved_name_handler' instead.",
452
+ DeprecationWarning,
453
+ )
454
+
455
+ if check_reserved is False:
456
+ reserved_name_handler = ReservedNameHandler.as_is
457
+
458
+ return FileNameSanitizer(
459
+ platform=platform,
460
+ max_len=-1 if max_len is None else max_len,
461
+ fs_encoding=fs_encoding,
462
+ null_value_handler=null_value_handler,
463
+ reserved_name_handler=reserved_name_handler,
464
+ additional_reserved_names=additional_reserved_names,
465
+ validate_after_sanitize=validate_after_sanitize,
466
+ ).sanitize(filename, replacement_text)
env-llmeval/lib/python3.10/site-packages/pathvalidate/_filepath.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import ntpath
6
+ import os.path
7
+ import posixpath
8
+ import re
9
+ import warnings
10
+ from pathlib import Path, PurePath
11
+ from typing import List, Optional, Pattern, Sequence, Tuple
12
+
13
+ from ._base import AbstractSanitizer, AbstractValidator, BaseFile, BaseValidator
14
+ from ._common import findall_to_str, to_str, validate_pathtype
15
+ from ._const import _NTFS_RESERVED_FILE_NAMES, DEFAULT_MIN_LEN, INVALID_CHAR_ERR_MSG_TMPL, Platform
16
+ from ._filename import FileNameSanitizer, FileNameValidator
17
+ from ._types import PathType, PlatformType
18
+ from .error import ErrorAttrKey, ErrorReason, InvalidCharError, ReservedNameError, ValidationError
19
+ from .handler import ReservedNameHandler, ValidationErrorHandler
20
+
21
+
22
+ _RE_INVALID_PATH = re.compile(f"[{re.escape(BaseFile._INVALID_PATH_CHARS):s}]", re.UNICODE)
23
+ _RE_INVALID_WIN_PATH = re.compile(f"[{re.escape(BaseFile._INVALID_WIN_PATH_CHARS):s}]", re.UNICODE)
24
+
25
+
26
+ class FilePathSanitizer(AbstractSanitizer):
27
+ def __init__(
28
+ self,
29
+ max_len: int = -1,
30
+ fs_encoding: Optional[str] = None,
31
+ platform: Optional[PlatformType] = None,
32
+ null_value_handler: Optional[ValidationErrorHandler] = None,
33
+ reserved_name_handler: Optional[ValidationErrorHandler] = None,
34
+ additional_reserved_names: Optional[Sequence[str]] = None,
35
+ normalize: bool = True,
36
+ validate_after_sanitize: bool = False,
37
+ validator: Optional[AbstractValidator] = None,
38
+ ) -> None:
39
+ if validator:
40
+ fpath_validator = validator
41
+ else:
42
+ fpath_validator = FilePathValidator(
43
+ min_len=DEFAULT_MIN_LEN,
44
+ max_len=max_len,
45
+ fs_encoding=fs_encoding,
46
+ check_reserved=True,
47
+ additional_reserved_names=additional_reserved_names,
48
+ platform=platform,
49
+ )
50
+ super().__init__(
51
+ max_len=max_len,
52
+ fs_encoding=fs_encoding,
53
+ validator=fpath_validator,
54
+ null_value_handler=null_value_handler,
55
+ reserved_name_handler=reserved_name_handler,
56
+ additional_reserved_names=additional_reserved_names,
57
+ platform=platform,
58
+ validate_after_sanitize=validate_after_sanitize,
59
+ )
60
+
61
+ self._sanitize_regexp = self._get_sanitize_regexp()
62
+ self.__fname_sanitizer = FileNameSanitizer(
63
+ max_len=self.max_len,
64
+ fs_encoding=fs_encoding,
65
+ null_value_handler=null_value_handler,
66
+ reserved_name_handler=reserved_name_handler,
67
+ additional_reserved_names=additional_reserved_names,
68
+ platform=self.platform,
69
+ validate_after_sanitize=validate_after_sanitize,
70
+ )
71
+ self.__normalize = normalize
72
+
73
+ if self._is_windows(include_universal=True):
74
+ self.__split_drive = ntpath.splitdrive
75
+ else:
76
+ self.__split_drive = posixpath.splitdrive
77
+
78
+ def sanitize(self, value: PathType, replacement_text: str = "") -> PathType:
79
+ try:
80
+ validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
81
+ except ValidationError as e:
82
+ if e.reason == ErrorReason.NULL_NAME:
83
+ if isinstance(value, PurePath):
84
+ raise
85
+
86
+ return self._null_value_handler(e)
87
+ raise
88
+
89
+ unicode_filepath = to_str(value)
90
+
91
+ drive, unicode_filepath = self.__split_drive(unicode_filepath)
92
+ unicode_filepath = self._sanitize_regexp.sub(replacement_text, unicode_filepath)
93
+ if self.__normalize and unicode_filepath:
94
+ unicode_filepath = os.path.normpath(unicode_filepath)
95
+ sanitized_path = unicode_filepath
96
+
97
+ sanitized_entries: List[str] = []
98
+ if drive:
99
+ sanitized_entries.append(drive)
100
+ for entry in sanitized_path.replace("\\", "/").split("/"):
101
+ if entry in _NTFS_RESERVED_FILE_NAMES:
102
+ sanitized_entries.append(f"{entry}_")
103
+ continue
104
+
105
+ sanitized_entry = str(
106
+ self.__fname_sanitizer.sanitize(entry, replacement_text=replacement_text)
107
+ )
108
+ if not sanitized_entry:
109
+ if not sanitized_entries:
110
+ sanitized_entries.append("")
111
+ continue
112
+
113
+ sanitized_entries.append(sanitized_entry)
114
+
115
+ sanitized_path = self.__get_path_separator().join(sanitized_entries)
116
+ try:
117
+ self._validator.validate(sanitized_path)
118
+ except ValidationError as e:
119
+ if e.reason == ErrorReason.NULL_NAME:
120
+ sanitized_path = self._null_value_handler(e)
121
+
122
+ if self._validate_after_sanitize:
123
+ self._validator.validate(sanitized_path)
124
+
125
+ if isinstance(value, PurePath):
126
+ return Path(sanitized_path)
127
+
128
+ return sanitized_path
129
+
130
+ def _get_sanitize_regexp(self) -> Pattern[str]:
131
+ if self._is_windows(include_universal=True):
132
+ return _RE_INVALID_WIN_PATH
133
+
134
+ return _RE_INVALID_PATH
135
+
136
+ def __get_path_separator(self) -> str:
137
+ if self._is_windows():
138
+ return "\\"
139
+
140
+ return "/"
141
+
142
+
143
+ class FilePathValidator(BaseValidator):
144
+ _RE_NTFS_RESERVED = re.compile(
145
+ "|".join(f"^/{re.escape(pattern)}$" for pattern in _NTFS_RESERVED_FILE_NAMES),
146
+ re.IGNORECASE,
147
+ )
148
+ _MACOS_RESERVED_FILE_PATHS = ("/", ":")
149
+
150
+ @property
151
+ def reserved_keywords(self) -> Tuple[str, ...]:
152
+ common_keywords = super().reserved_keywords
153
+
154
+ if any([self._is_universal(), self._is_posix(), self._is_macos()]):
155
+ return common_keywords + self._MACOS_RESERVED_FILE_PATHS
156
+
157
+ if self._is_linux():
158
+ return common_keywords + ("/",)
159
+
160
+ return common_keywords
161
+
162
+ def __init__(
163
+ self,
164
+ min_len: int = DEFAULT_MIN_LEN,
165
+ max_len: int = -1,
166
+ fs_encoding: Optional[str] = None,
167
+ platform: Optional[PlatformType] = None,
168
+ check_reserved: bool = True,
169
+ additional_reserved_names: Optional[Sequence[str]] = None,
170
+ ) -> None:
171
+ super().__init__(
172
+ min_len=min_len,
173
+ max_len=max_len,
174
+ fs_encoding=fs_encoding,
175
+ check_reserved=check_reserved,
176
+ additional_reserved_names=additional_reserved_names,
177
+ platform=platform,
178
+ )
179
+
180
+ self.__fname_validator = FileNameValidator(
181
+ min_len=min_len,
182
+ max_len=max_len,
183
+ check_reserved=check_reserved,
184
+ additional_reserved_names=additional_reserved_names,
185
+ platform=platform,
186
+ )
187
+
188
+ if self._is_windows(include_universal=True):
189
+ self.__split_drive = ntpath.splitdrive
190
+ else:
191
+ self.__split_drive = posixpath.splitdrive
192
+
193
+ def validate(self, value: PathType) -> None:
194
+ validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
195
+ self.validate_abspath(value)
196
+
197
+ _drive, tail = self.__split_drive(value)
198
+ if not tail:
199
+ return
200
+
201
+ unicode_filepath = to_str(tail)
202
+ byte_ct = len(unicode_filepath.encode(self._fs_encoding))
203
+ err_kwargs = {
204
+ ErrorAttrKey.REASON: ErrorReason.INVALID_LENGTH,
205
+ ErrorAttrKey.PLATFORM: self.platform,
206
+ ErrorAttrKey.FS_ENCODING: self._fs_encoding,
207
+ ErrorAttrKey.BYTE_COUNT: byte_ct,
208
+ }
209
+
210
+ if byte_ct > self.max_len:
211
+ raise ValidationError(
212
+ [
213
+ f"file path is too long: expected<={self.max_len:d} bytes, actual={byte_ct:d} bytes"
214
+ ],
215
+ **err_kwargs,
216
+ )
217
+ if byte_ct < self.min_len:
218
+ raise ValidationError(
219
+ [
220
+ "file path is too short: expected>={:d} bytes, actual={:d} bytes".format(
221
+ self.min_len, byte_ct
222
+ )
223
+ ],
224
+ **err_kwargs,
225
+ )
226
+
227
+ self._validate_reserved_keywords(unicode_filepath)
228
+ unicode_filepath = unicode_filepath.replace("\\", "/")
229
+ for entry in unicode_filepath.split("/"):
230
+ if not entry or entry in (".", ".."):
231
+ continue
232
+
233
+ self.__fname_validator._validate_reserved_keywords(entry)
234
+
235
+ if self._is_windows(include_universal=True):
236
+ self.__validate_win_filepath(unicode_filepath)
237
+ else:
238
+ self.__validate_unix_filepath(unicode_filepath)
239
+
240
+ def validate_abspath(self, value: PathType) -> None:
241
+ is_posix_abs = posixpath.isabs(value)
242
+ is_nt_abs = ntpath.isabs(value)
243
+ err_object = ValidationError(
244
+ description=(
245
+ "an invalid absolute file path ({}) for the platform ({}).".format(
246
+ value, self.platform.value
247
+ )
248
+ + " to avoid the error, specify an appropriate platform corresponding to"
249
+ + " the path format or 'auto'."
250
+ ),
251
+ platform=self.platform,
252
+ reason=ErrorReason.MALFORMED_ABS_PATH,
253
+ )
254
+
255
+ if any([self._is_windows() and is_nt_abs, self._is_linux() and is_posix_abs]):
256
+ return
257
+
258
+ if self._is_universal() and any([is_posix_abs, is_nt_abs]):
259
+ ValidationError(
260
+ description=(
261
+ ("POSIX style" if is_posix_abs else "NT style")
262
+ + " absolute file path found. expected a platform-independent file path."
263
+ ),
264
+ platform=self.platform,
265
+ reason=ErrorReason.MALFORMED_ABS_PATH,
266
+ )
267
+
268
+ if self._is_windows(include_universal=True) and is_posix_abs:
269
+ raise err_object
270
+
271
+ drive, _tail = ntpath.splitdrive(value)
272
+ if not self._is_windows() and drive and is_nt_abs:
273
+ raise err_object
274
+
275
+ def __validate_unix_filepath(self, unicode_filepath: str) -> None:
276
+ match = _RE_INVALID_PATH.findall(unicode_filepath)
277
+ if match:
278
+ raise InvalidCharError(
279
+ INVALID_CHAR_ERR_MSG_TMPL.format(
280
+ invalid=findall_to_str(match), value=repr(unicode_filepath)
281
+ )
282
+ )
283
+
284
+ def __validate_win_filepath(self, unicode_filepath: str) -> None:
285
+ match = _RE_INVALID_WIN_PATH.findall(unicode_filepath)
286
+ if match:
287
+ raise InvalidCharError(
288
+ INVALID_CHAR_ERR_MSG_TMPL.format(
289
+ invalid=findall_to_str(match), value=repr(unicode_filepath)
290
+ ),
291
+ platform=Platform.WINDOWS,
292
+ )
293
+
294
+ _drive, value = self.__split_drive(unicode_filepath)
295
+ if value:
296
+ match_reserved = self._RE_NTFS_RESERVED.search(value)
297
+ if match_reserved:
298
+ reserved_name = match_reserved.group()
299
+ raise ReservedNameError(
300
+ f"'{reserved_name}' is a reserved name",
301
+ reusable_name=False,
302
+ reserved_name=reserved_name,
303
+ platform=self.platform,
304
+ )
305
+
306
+
307
+ def validate_filepath(
308
+ file_path: PathType,
309
+ platform: Optional[PlatformType] = None,
310
+ min_len: int = DEFAULT_MIN_LEN,
311
+ max_len: Optional[int] = None,
312
+ fs_encoding: Optional[str] = None,
313
+ check_reserved: bool = True,
314
+ additional_reserved_names: Optional[Sequence[str]] = None,
315
+ ) -> None:
316
+ """Verifying whether the ``file_path`` is a valid file path or not.
317
+
318
+ Args:
319
+ file_path (PathType):
320
+ File path to be validated.
321
+ platform (Optional[PlatformType], optional):
322
+ Target platform name of the file path.
323
+
324
+ .. include:: platform.txt
325
+ min_len (int, optional):
326
+ Minimum byte length of the ``file_path``. The value must be greater or equal to one.
327
+ Defaults to ``1``.
328
+ max_len (Optional[int], optional):
329
+ Maximum byte length of the ``file_path``. If the value is |None| or minus,
330
+ automatically determined by the ``platform``:
331
+
332
+ - ``Linux``: 4096
333
+ - ``macOS``: 1024
334
+ - ``Windows``: 260
335
+ - ``universal``: 260
336
+ fs_encoding (Optional[str], optional):
337
+ Filesystem encoding that used to calculate the byte length of the file path.
338
+ If |None|, get the value from the execution environment.
339
+ check_reserved (bool, optional):
340
+ If |True|, check reserved names of the ``platform``.
341
+ Defaults to |True|.
342
+ additional_reserved_names (Optional[Sequence[str]], optional):
343
+ Additional reserved names to check.
344
+
345
+ Raises:
346
+ ValidationError (ErrorReason.INVALID_CHARACTER):
347
+ If the ``file_path`` includes invalid char(s):
348
+ |invalid_file_path_chars|.
349
+ The following characters are also invalid for Windows platforms:
350
+ |invalid_win_file_path_chars|
351
+ ValidationError (ErrorReason.INVALID_LENGTH):
352
+ If the ``file_path`` is longer than ``max_len`` characters.
353
+ ValidationError:
354
+ If ``file_path`` include invalid values.
355
+
356
+ Example:
357
+ :ref:`example-validate-file-path`
358
+
359
+ See Also:
360
+ `Naming Files, Paths, and Namespaces - Win32 apps | Microsoft Docs
361
+ <https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file>`__
362
+ """
363
+
364
+ FilePathValidator(
365
+ platform=platform,
366
+ min_len=min_len,
367
+ max_len=-1 if max_len is None else max_len,
368
+ fs_encoding=fs_encoding,
369
+ check_reserved=check_reserved,
370
+ additional_reserved_names=additional_reserved_names,
371
+ ).validate(file_path)
372
+
373
+
374
+ def is_valid_filepath(
375
+ file_path: PathType,
376
+ platform: Optional[PlatformType] = None,
377
+ min_len: int = DEFAULT_MIN_LEN,
378
+ max_len: Optional[int] = None,
379
+ fs_encoding: Optional[str] = None,
380
+ check_reserved: bool = True,
381
+ additional_reserved_names: Optional[Sequence[str]] = None,
382
+ ) -> bool:
383
+ """Check whether the ``file_path`` is a valid name or not.
384
+
385
+ Args:
386
+ file_path:
387
+ A filepath to be checked.
388
+ platform:
389
+ Target platform name of the file path.
390
+
391
+ Example:
392
+ :ref:`example-is-valid-filepath`
393
+
394
+ See Also:
395
+ :py:func:`.validate_filepath()`
396
+ """
397
+
398
+ return FilePathValidator(
399
+ platform=platform,
400
+ min_len=min_len,
401
+ max_len=-1 if max_len is None else max_len,
402
+ fs_encoding=fs_encoding,
403
+ check_reserved=check_reserved,
404
+ additional_reserved_names=additional_reserved_names,
405
+ ).is_valid(file_path)
406
+
407
+
408
+ def sanitize_filepath(
409
+ file_path: PathType,
410
+ replacement_text: str = "",
411
+ platform: Optional[PlatformType] = None,
412
+ max_len: Optional[int] = None,
413
+ fs_encoding: Optional[str] = None,
414
+ check_reserved: Optional[bool] = None,
415
+ null_value_handler: Optional[ValidationErrorHandler] = None,
416
+ reserved_name_handler: Optional[ValidationErrorHandler] = None,
417
+ additional_reserved_names: Optional[Sequence[str]] = None,
418
+ normalize: bool = True,
419
+ validate_after_sanitize: bool = False,
420
+ ) -> PathType:
421
+ """Make a valid file path from a string.
422
+
423
+ To make a valid file path, the function does the following:
424
+
425
+ - Replace invalid characters for a file path within the ``file_path``
426
+ with the ``replacement_text``. Invalid characters are as follows:
427
+
428
+ - unprintable characters
429
+ - |invalid_file_path_chars|
430
+ - for Windows (or universal) only: |invalid_win_file_path_chars|
431
+
432
+ - Replace a value if a sanitized value is a reserved name by operating systems
433
+ with a specified handler by ``reserved_name_handler``.
434
+
435
+ Args:
436
+ file_path:
437
+ File path to sanitize.
438
+ replacement_text:
439
+ Replacement text for invalid characters.
440
+ Defaults to ``""``.
441
+ platform:
442
+ Target platform name of the file path.
443
+
444
+ .. include:: platform.txt
445
+ max_len:
446
+ Maximum byte length of the file path.
447
+ Truncate the path if the value length exceeds the `max_len`.
448
+ If the value is |None| or minus, ``max_len`` will automatically determined by the ``platform``:
449
+
450
+ - ``Linux``: 4096
451
+ - ``macOS``: 1024
452
+ - ``Windows``: 260
453
+ - ``universal``: 260
454
+ fs_encoding:
455
+ Filesystem encoding that used to calculate the byte length of the file path.
456
+ If |None|, get the value from the execution environment.
457
+ check_reserved:
458
+ [Deprecated] Use 'reserved_name_handler' instead.
459
+ null_value_handler:
460
+ Function called when a value after sanitization is an empty string.
461
+ You can specify predefined handlers:
462
+
463
+ - :py:func:`.handler.NullValueHandler.return_null_string`
464
+ - :py:func:`.handler.NullValueHandler.return_timestamp`
465
+ - :py:func:`.handler.raise_error`
466
+
467
+ Defaults to :py:func:`.handler.NullValueHandler.return_null_string` that just return ``""``.
468
+ reserved_name_handler:
469
+ Function called when a value after sanitization is one of the reserved names.
470
+ You can specify predefined handlers:
471
+
472
+ - :py:meth:`~.handler.ReservedNameHandler.add_leading_underscore`
473
+ - :py:meth:`~.handler.ReservedNameHandler.add_trailing_underscore`
474
+ - :py:meth:`~.handler.ReservedNameHandler.as_is`
475
+ - :py:func:`~.handler.raise_error`
476
+
477
+ Defaults to :py:func:`.handler.add_trailing_underscore`.
478
+ additional_reserved_names:
479
+ Additional reserved names to sanitize.
480
+ Case insensitive.
481
+ normalize:
482
+ If |True|, normalize the the file path.
483
+ validate_after_sanitize:
484
+ Execute validation after sanitization to the file path.
485
+
486
+ Returns:
487
+ Same type as the argument (str or PathLike object):
488
+ Sanitized filepath.
489
+
490
+ Raises:
491
+ ValueError:
492
+ If the ``file_path`` is an invalid file path.
493
+
494
+ Example:
495
+ :ref:`example-sanitize-file-path`
496
+ """
497
+
498
+ if check_reserved is not None:
499
+ warnings.warn(
500
+ "'check_reserved' is deprecated. Use 'reserved_name_handler' instead.",
501
+ DeprecationWarning,
502
+ )
503
+
504
+ if check_reserved is False:
505
+ reserved_name_handler = ReservedNameHandler.as_is
506
+
507
+ return FilePathSanitizer(
508
+ platform=platform,
509
+ max_len=-1 if max_len is None else max_len,
510
+ fs_encoding=fs_encoding,
511
+ normalize=normalize,
512
+ null_value_handler=null_value_handler,
513
+ reserved_name_handler=reserved_name_handler,
514
+ additional_reserved_names=additional_reserved_names,
515
+ validate_after_sanitize=validate_after_sanitize,
516
+ ).sanitize(file_path, replacement_text)
env-llmeval/lib/python3.10/site-packages/pathvalidate/_ltsv.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import re
6
+
7
+ from ._common import to_str, validate_pathtype
8
+ from .error import InvalidCharError
9
+
10
+
11
+ __RE_INVALID_LTSV_LABEL = re.compile("[^0-9A-Za-z_.-]", re.UNICODE)
12
+
13
+
14
+ def validate_ltsv_label(label: str) -> None:
15
+ """
16
+ Verifying whether ``label`` is a valid
17
+ `Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
18
+
19
+ :param label: Label to validate.
20
+ :raises pathvalidate.ValidationError:
21
+ If invalid character(s) found in the ``label`` for a LTSV format label.
22
+ """
23
+
24
+ validate_pathtype(label, allow_whitespaces=False)
25
+
26
+ match_list = __RE_INVALID_LTSV_LABEL.findall(to_str(label))
27
+ if match_list:
28
+ raise InvalidCharError(f"invalid character found for a LTSV format label: {match_list}")
29
+
30
+
31
+ def sanitize_ltsv_label(label: str, replacement_text: str = "") -> str:
32
+ """
33
+ Replace all of the symbols in text.
34
+
35
+ :param label: Input text.
36
+ :param replacement_text: Replacement text.
37
+ :return: A replacement string.
38
+ :rtype: str
39
+ """
40
+
41
+ validate_pathtype(label, allow_whitespaces=False)
42
+
43
+ return __RE_INVALID_LTSV_LABEL.sub(replacement_text, to_str(label))
env-llmeval/lib/python3.10/site-packages/pkg_resources/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (101 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-310.pyc ADDED
Binary file (199 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # Copyright (c) 2005-2010 ActiveState Software Inc.
4
+ # Copyright (c) 2013 Eddy Petrișor
5
+
6
+ """Utilities for determining application-specific dirs.
7
+
8
+ See <http://github.com/ActiveState/appdirs> for details and usage.
9
+ """
10
+ # Dev Notes:
11
+ # - MSDN on where to store app data files:
12
+ # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
13
+ # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
14
+ # - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
15
+
16
+ __version_info__ = (1, 4, 3)
17
+ __version__ = '.'.join(map(str, __version_info__))
18
+
19
+
20
+ import sys
21
+ import os
22
+
23
+ PY3 = sys.version_info[0] == 3
24
+
25
+ if PY3:
26
+ unicode = str
27
+
28
+ if sys.platform.startswith('java'):
29
+ import platform
30
+ os_name = platform.java_ver()[3][0]
31
+ if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
32
+ system = 'win32'
33
+ elif os_name.startswith('Mac'): # "Mac OS X", etc.
34
+ system = 'darwin'
35
+ else: # "Linux", "SunOS", "FreeBSD", etc.
36
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
37
+ # are actually checked for and the rest of the module expects
38
+ # *sys.platform* style strings.
39
+ system = 'linux2'
40
+ else:
41
+ system = sys.platform
42
+
43
+
44
+
45
+ def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
46
+ r"""Return full path to the user-specific data dir for this application.
47
+
48
+ "appname" is the name of application.
49
+ If None, just the system directory is returned.
50
+ "appauthor" (only used on Windows) is the name of the
51
+ appauthor or distributing body for this application. Typically
52
+ it is the owning company name. This falls back to appname. You may
53
+ pass False to disable it.
54
+ "version" is an optional version path element to append to the
55
+ path. You might want to use this if you want multiple versions
56
+ of your app to be able to run independently. If used, this
57
+ would typically be "<major>.<minor>".
58
+ Only applied when appname is present.
59
+ "roaming" (boolean, default False) can be set True to use the Windows
60
+ roaming appdata directory. That means that for users on a Windows
61
+ network setup for roaming profiles, this user data will be
62
+ sync'd on login. See
63
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
64
+ for a discussion of issues.
65
+
66
+ Typical user data directories are:
67
+ Mac OS X: ~/Library/Application Support/<AppName>
68
+ Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
69
+ Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
70
+ Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
71
+ Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
72
+ Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
73
+
74
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
75
+ That means, by default "~/.local/share/<AppName>".
76
+ """
77
+ if system == "win32":
78
+ if appauthor is None:
79
+ appauthor = appname
80
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
81
+ path = os.path.normpath(_get_win_folder(const))
82
+ if appname:
83
+ if appauthor is not False:
84
+ path = os.path.join(path, appauthor, appname)
85
+ else:
86
+ path = os.path.join(path, appname)
87
+ elif system == 'darwin':
88
+ path = os.path.expanduser('~/Library/Application Support/')
89
+ if appname:
90
+ path = os.path.join(path, appname)
91
+ else:
92
+ path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
93
+ if appname:
94
+ path = os.path.join(path, appname)
95
+ if appname and version:
96
+ path = os.path.join(path, version)
97
+ return path
98
+
99
+
100
+ def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
101
+ r"""Return full path to the user-shared data dir for this application.
102
+
103
+ "appname" is the name of application.
104
+ If None, just the system directory is returned.
105
+ "appauthor" (only used on Windows) is the name of the
106
+ appauthor or distributing body for this application. Typically
107
+ it is the owning company name. This falls back to appname. You may
108
+ pass False to disable it.
109
+ "version" is an optional version path element to append to the
110
+ path. You might want to use this if you want multiple versions
111
+ of your app to be able to run independently. If used, this
112
+ would typically be "<major>.<minor>".
113
+ Only applied when appname is present.
114
+ "multipath" is an optional parameter only applicable to *nix
115
+ which indicates that the entire list of data dirs should be
116
+ returned. By default, the first item from XDG_DATA_DIRS is
117
+ returned, or '/usr/local/share/<AppName>',
118
+ if XDG_DATA_DIRS is not set
119
+
120
+ Typical site data directories are:
121
+ Mac OS X: /Library/Application Support/<AppName>
122
+ Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
123
+ Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
124
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
125
+ Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
126
+
127
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
128
+
129
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
130
+ """
131
+ if system == "win32":
132
+ if appauthor is None:
133
+ appauthor = appname
134
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
135
+ if appname:
136
+ if appauthor is not False:
137
+ path = os.path.join(path, appauthor, appname)
138
+ else:
139
+ path = os.path.join(path, appname)
140
+ elif system == 'darwin':
141
+ path = os.path.expanduser('/Library/Application Support')
142
+ if appname:
143
+ path = os.path.join(path, appname)
144
+ else:
145
+ # XDG default for $XDG_DATA_DIRS
146
+ # only first, if multipath is False
147
+ path = os.getenv('XDG_DATA_DIRS',
148
+ os.pathsep.join(['/usr/local/share', '/usr/share']))
149
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
150
+ if appname:
151
+ if version:
152
+ appname = os.path.join(appname, version)
153
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
154
+
155
+ if multipath:
156
+ path = os.pathsep.join(pathlist)
157
+ else:
158
+ path = pathlist[0]
159
+ return path
160
+
161
+ if appname and version:
162
+ path = os.path.join(path, version)
163
+ return path
164
+
165
+
166
+ def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
167
+ r"""Return full path to the user-specific config dir for this application.
168
+
169
+ "appname" is the name of application.
170
+ If None, just the system directory is returned.
171
+ "appauthor" (only used on Windows) is the name of the
172
+ appauthor or distributing body for this application. Typically
173
+ it is the owning company name. This falls back to appname. You may
174
+ pass False to disable it.
175
+ "version" is an optional version path element to append to the
176
+ path. You might want to use this if you want multiple versions
177
+ of your app to be able to run independently. If used, this
178
+ would typically be "<major>.<minor>".
179
+ Only applied when appname is present.
180
+ "roaming" (boolean, default False) can be set True to use the Windows
181
+ roaming appdata directory. That means that for users on a Windows
182
+ network setup for roaming profiles, this user data will be
183
+ sync'd on login. See
184
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
185
+ for a discussion of issues.
186
+
187
+ Typical user config directories are:
188
+ Mac OS X: same as user_data_dir
189
+ Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
190
+ Win *: same as user_data_dir
191
+
192
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
193
+ That means, by default "~/.config/<AppName>".
194
+ """
195
+ if system in ["win32", "darwin"]:
196
+ path = user_data_dir(appname, appauthor, None, roaming)
197
+ else:
198
+ path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
199
+ if appname:
200
+ path = os.path.join(path, appname)
201
+ if appname and version:
202
+ path = os.path.join(path, version)
203
+ return path
204
+
205
+
206
+ def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
207
+ r"""Return full path to the user-shared data dir for this application.
208
+
209
+ "appname" is the name of application.
210
+ If None, just the system directory is returned.
211
+ "appauthor" (only used on Windows) is the name of the
212
+ appauthor or distributing body for this application. Typically
213
+ it is the owning company name. This falls back to appname. You may
214
+ pass False to disable it.
215
+ "version" is an optional version path element to append to the
216
+ path. You might want to use this if you want multiple versions
217
+ of your app to be able to run independently. If used, this
218
+ would typically be "<major>.<minor>".
219
+ Only applied when appname is present.
220
+ "multipath" is an optional parameter only applicable to *nix
221
+ which indicates that the entire list of config dirs should be
222
+ returned. By default, the first item from XDG_CONFIG_DIRS is
223
+ returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
224
+
225
+ Typical site config directories are:
226
+ Mac OS X: same as site_data_dir
227
+ Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
228
+ $XDG_CONFIG_DIRS
229
+ Win *: same as site_data_dir
230
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
231
+
232
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
233
+
234
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
235
+ """
236
+ if system in ["win32", "darwin"]:
237
+ path = site_data_dir(appname, appauthor)
238
+ if appname and version:
239
+ path = os.path.join(path, version)
240
+ else:
241
+ # XDG default for $XDG_CONFIG_DIRS
242
+ # only first, if multipath is False
243
+ path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
244
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
245
+ if appname:
246
+ if version:
247
+ appname = os.path.join(appname, version)
248
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
249
+
250
+ if multipath:
251
+ path = os.pathsep.join(pathlist)
252
+ else:
253
+ path = pathlist[0]
254
+ return path
255
+
256
+
257
+ def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
258
+ r"""Return full path to the user-specific cache dir for this application.
259
+
260
+ "appname" is the name of application.
261
+ If None, just the system directory is returned.
262
+ "appauthor" (only used on Windows) is the name of the
263
+ appauthor or distributing body for this application. Typically
264
+ it is the owning company name. This falls back to appname. You may
265
+ pass False to disable it.
266
+ "version" is an optional version path element to append to the
267
+ path. You might want to use this if you want multiple versions
268
+ of your app to be able to run independently. If used, this
269
+ would typically be "<major>.<minor>".
270
+ Only applied when appname is present.
271
+ "opinion" (boolean) can be False to disable the appending of
272
+ "Cache" to the base app data dir for Windows. See
273
+ discussion below.
274
+
275
+ Typical user cache directories are:
276
+ Mac OS X: ~/Library/Caches/<AppName>
277
+ Unix: ~/.cache/<AppName> (XDG default)
278
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
279
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
280
+
281
+ On Windows the only suggestion in the MSDN docs is that local settings go in
282
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
283
+ app data dir (the default returned by `user_data_dir` above). Apps typically
284
+ put cache data somewhere *under* the given dir here. Some examples:
285
+ ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
286
+ ...\Acme\SuperApp\Cache\1.0
287
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
288
+ This can be disabled with the `opinion=False` option.
289
+ """
290
+ if system == "win32":
291
+ if appauthor is None:
292
+ appauthor = appname
293
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
294
+ if appname:
295
+ if appauthor is not False:
296
+ path = os.path.join(path, appauthor, appname)
297
+ else:
298
+ path = os.path.join(path, appname)
299
+ if opinion:
300
+ path = os.path.join(path, "Cache")
301
+ elif system == 'darwin':
302
+ path = os.path.expanduser('~/Library/Caches')
303
+ if appname:
304
+ path = os.path.join(path, appname)
305
+ else:
306
+ path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
307
+ if appname:
308
+ path = os.path.join(path, appname)
309
+ if appname and version:
310
+ path = os.path.join(path, version)
311
+ return path
312
+
313
+
314
+ def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
315
+ r"""Return full path to the user-specific state dir for this application.
316
+
317
+ "appname" is the name of application.
318
+ If None, just the system directory is returned.
319
+ "appauthor" (only used on Windows) is the name of the
320
+ appauthor or distributing body for this application. Typically
321
+ it is the owning company name. This falls back to appname. You may
322
+ pass False to disable it.
323
+ "version" is an optional version path element to append to the
324
+ path. You might want to use this if you want multiple versions
325
+ of your app to be able to run independently. If used, this
326
+ would typically be "<major>.<minor>".
327
+ Only applied when appname is present.
328
+ "roaming" (boolean, default False) can be set True to use the Windows
329
+ roaming appdata directory. That means that for users on a Windows
330
+ network setup for roaming profiles, this user data will be
331
+ sync'd on login. See
332
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
333
+ for a discussion of issues.
334
+
335
+ Typical user state directories are:
336
+ Mac OS X: same as user_data_dir
337
+ Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
338
+ Win *: same as user_data_dir
339
+
340
+ For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
341
+ to extend the XDG spec and support $XDG_STATE_HOME.
342
+
343
+ That means, by default "~/.local/state/<AppName>".
344
+ """
345
+ if system in ["win32", "darwin"]:
346
+ path = user_data_dir(appname, appauthor, None, roaming)
347
+ else:
348
+ path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
349
+ if appname:
350
+ path = os.path.join(path, appname)
351
+ if appname and version:
352
+ path = os.path.join(path, version)
353
+ return path
354
+
355
+
356
+ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
357
+ r"""Return full path to the user-specific log dir for this application.
358
+
359
+ "appname" is the name of application.
360
+ If None, just the system directory is returned.
361
+ "appauthor" (only used on Windows) is the name of the
362
+ appauthor or distributing body for this application. Typically
363
+ it is the owning company name. This falls back to appname. You may
364
+ pass False to disable it.
365
+ "version" is an optional version path element to append to the
366
+ path. You might want to use this if you want multiple versions
367
+ of your app to be able to run independently. If used, this
368
+ would typically be "<major>.<minor>".
369
+ Only applied when appname is present.
370
+ "opinion" (boolean) can be False to disable the appending of
371
+ "Logs" to the base app data dir for Windows, and "log" to the
372
+ base cache dir for Unix. See discussion below.
373
+
374
+ Typical user log directories are:
375
+ Mac OS X: ~/Library/Logs/<AppName>
376
+ Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
377
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
378
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
379
+
380
+ On Windows the only suggestion in the MSDN docs is that local settings
381
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
382
+ examples of what some windows apps use for a logs dir.)
383
+
384
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
385
+ value for Windows and appends "log" to the user cache dir for Unix.
386
+ This can be disabled with the `opinion=False` option.
387
+ """
388
+ if system == "darwin":
389
+ path = os.path.join(
390
+ os.path.expanduser('~/Library/Logs'),
391
+ appname)
392
+ elif system == "win32":
393
+ path = user_data_dir(appname, appauthor, version)
394
+ version = False
395
+ if opinion:
396
+ path = os.path.join(path, "Logs")
397
+ else:
398
+ path = user_cache_dir(appname, appauthor, version)
399
+ version = False
400
+ if opinion:
401
+ path = os.path.join(path, "log")
402
+ if appname and version:
403
+ path = os.path.join(path, version)
404
+ return path
405
+
406
+
407
+ class AppDirs(object):
408
+ """Convenience wrapper for getting application dirs."""
409
+ def __init__(self, appname=None, appauthor=None, version=None,
410
+ roaming=False, multipath=False):
411
+ self.appname = appname
412
+ self.appauthor = appauthor
413
+ self.version = version
414
+ self.roaming = roaming
415
+ self.multipath = multipath
416
+
417
+ @property
418
+ def user_data_dir(self):
419
+ return user_data_dir(self.appname, self.appauthor,
420
+ version=self.version, roaming=self.roaming)
421
+
422
+ @property
423
+ def site_data_dir(self):
424
+ return site_data_dir(self.appname, self.appauthor,
425
+ version=self.version, multipath=self.multipath)
426
+
427
+ @property
428
+ def user_config_dir(self):
429
+ return user_config_dir(self.appname, self.appauthor,
430
+ version=self.version, roaming=self.roaming)
431
+
432
+ @property
433
+ def site_config_dir(self):
434
+ return site_config_dir(self.appname, self.appauthor,
435
+ version=self.version, multipath=self.multipath)
436
+
437
+ @property
438
+ def user_cache_dir(self):
439
+ return user_cache_dir(self.appname, self.appauthor,
440
+ version=self.version)
441
+
442
+ @property
443
+ def user_state_dir(self):
444
+ return user_state_dir(self.appname, self.appauthor,
445
+ version=self.version)
446
+
447
+ @property
448
+ def user_log_dir(self):
449
+ return user_log_dir(self.appname, self.appauthor,
450
+ version=self.version)
451
+
452
+
453
+ #---- internal support stuff
454
+
455
+ def _get_win_folder_from_registry(csidl_name):
456
+ """This is a fallback technique at best. I'm not sure if using the
457
+ registry for this guarantees us the correct answer for all CSIDL_*
458
+ names.
459
+ """
460
+ if PY3:
461
+ import winreg as _winreg
462
+ else:
463
+ import _winreg
464
+
465
+ shell_folder_name = {
466
+ "CSIDL_APPDATA": "AppData",
467
+ "CSIDL_COMMON_APPDATA": "Common AppData",
468
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
469
+ }[csidl_name]
470
+
471
+ key = _winreg.OpenKey(
472
+ _winreg.HKEY_CURRENT_USER,
473
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
474
+ )
475
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
476
+ return dir
477
+
478
+
479
+ def _get_win_folder_with_pywin32(csidl_name):
480
+ from win32com.shell import shellcon, shell
481
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
482
+ # Try to make this a unicode path because SHGetFolderPath does
483
+ # not return unicode strings when there is unicode data in the
484
+ # path.
485
+ try:
486
+ dir = unicode(dir)
487
+
488
+ # Downgrade to short path name if have highbit chars. See
489
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
490
+ has_high_char = False
491
+ for c in dir:
492
+ if ord(c) > 255:
493
+ has_high_char = True
494
+ break
495
+ if has_high_char:
496
+ try:
497
+ import win32api
498
+ dir = win32api.GetShortPathName(dir)
499
+ except ImportError:
500
+ pass
501
+ except UnicodeError:
502
+ pass
503
+ return dir
504
+
505
+
506
+ def _get_win_folder_with_ctypes(csidl_name):
507
+ import ctypes
508
+
509
+ csidl_const = {
510
+ "CSIDL_APPDATA": 26,
511
+ "CSIDL_COMMON_APPDATA": 35,
512
+ "CSIDL_LOCAL_APPDATA": 28,
513
+ }[csidl_name]
514
+
515
+ buf = ctypes.create_unicode_buffer(1024)
516
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
517
+
518
+ # Downgrade to short path name if have highbit chars. See
519
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
520
+ has_high_char = False
521
+ for c in buf:
522
+ if ord(c) > 255:
523
+ has_high_char = True
524
+ break
525
+ if has_high_char:
526
+ buf2 = ctypes.create_unicode_buffer(1024)
527
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
528
+ buf = buf2
529
+
530
+ return buf.value
531
+
532
+ def _get_win_folder_with_jna(csidl_name):
533
+ import array
534
+ from com.sun import jna
535
+ from com.sun.jna.platform import win32
536
+
537
+ buf_size = win32.WinDef.MAX_PATH * 2
538
+ buf = array.zeros('c', buf_size)
539
+ shell = win32.Shell32.INSTANCE
540
+ shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
541
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
542
+
543
+ # Downgrade to short path name if have highbit chars. See
544
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
545
+ has_high_char = False
546
+ for c in dir:
547
+ if ord(c) > 255:
548
+ has_high_char = True
549
+ break
550
+ if has_high_char:
551
+ buf = array.zeros('c', buf_size)
552
+ kernel = win32.Kernel32.INSTANCE
553
+ if kernel.GetShortPathName(dir, buf, buf_size):
554
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
555
+
556
+ return dir
557
+
558
+ if system == "win32":
559
+ try:
560
+ import win32com.shell
561
+ _get_win_folder = _get_win_folder_with_pywin32
562
+ except ImportError:
563
+ try:
564
+ from ctypes import windll
565
+ _get_win_folder = _get_win_folder_with_ctypes
566
+ except ImportError:
567
+ try:
568
+ import com.sun.jna
569
+ _get_win_folder = _get_win_folder_with_jna
570
+ except ImportError:
571
+ _get_win_folder = _get_win_folder_from_registry
572
+
573
+
574
+ #---- self test code
575
+
576
+ if __name__ == "__main__":
577
+ appname = "MyApp"
578
+ appauthor = "MyCompany"
579
+
580
+ props = ("user_data_dir",
581
+ "user_config_dir",
582
+ "user_cache_dir",
583
+ "user_state_dir",
584
+ "user_log_dir",
585
+ "site_data_dir",
586
+ "site_config_dir")
587
+
588
+ print("-- app dirs %s --" % __version__)
589
+
590
+ print("-- app dirs (with optional 'version')")
591
+ dirs = AppDirs(appname, appauthor, version="1.0")
592
+ for prop in props:
593
+ print("%s: %s" % (prop, getattr(dirs, prop)))
594
+
595
+ print("\n-- app dirs (without optional 'version')")
596
+ dirs = AppDirs(appname, appauthor)
597
+ for prop in props:
598
+ print("%s: %s" % (prop, getattr(dirs, prop)))
599
+
600
+ print("\n-- app dirs (without optional 'appauthor')")
601
+ dirs = AppDirs(appname)
602
+ for prop in props:
603
+ print("%s: %s" % (prop, getattr(dirs, prop)))
604
+
605
+ print("\n-- app dirs (with disabled 'appauthor')")
606
+ dirs = AppDirs(appname, appauthor=False)
607
+ for prop in props:
608
+ print("%s: %s" % (prop, getattr(dirs, prop)))
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc ADDED
Binary file (593 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (449 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc ADDED
Binary file (7.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc ADDED
Binary file (4.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc ADDED
Binary file (9.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PEP 656 support.
2
+
3
+ This module implements logic to detect if the currently running Python is
4
+ linked against musl, and what musl version is used.
5
+ """
6
+
7
+ import contextlib
8
+ import functools
9
+ import operator
10
+ import os
11
+ import re
12
+ import struct
13
+ import subprocess
14
+ import sys
15
+ from typing import IO, Iterator, NamedTuple, Optional, Tuple
16
+
17
+
18
+ def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
19
+ return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
20
+
21
+
22
+ def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
23
+ """Detect musl libc location by parsing the Python executable.
24
+
25
+ Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
26
+ ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
27
+ """
28
+ f.seek(0)
29
+ try:
30
+ ident = _read_unpacked(f, "16B")
31
+ except struct.error:
32
+ return None
33
+ if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
34
+ return None
35
+ f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
36
+
37
+ try:
38
+ # e_fmt: Format for program header.
39
+ # p_fmt: Format for section header.
40
+ # p_idx: Indexes to find p_type, p_offset, and p_filesz.
41
+ e_fmt, p_fmt, p_idx = {
42
+ 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
43
+ 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
44
+ }[ident[4]]
45
+ except KeyError:
46
+ return None
47
+ else:
48
+ p_get = operator.itemgetter(*p_idx)
49
+
50
+ # Find the interpreter section and return its content.
51
+ try:
52
+ _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
53
+ except struct.error:
54
+ return None
55
+ for i in range(e_phnum + 1):
56
+ f.seek(e_phoff + e_phentsize * i)
57
+ try:
58
+ p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
59
+ except struct.error:
60
+ return None
61
+ if p_type != 3: # Not PT_INTERP.
62
+ continue
63
+ f.seek(p_offset)
64
+ interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
65
+ if "musl" not in interpreter:
66
+ return None
67
+ return interpreter
68
+ return None
69
+
70
+
71
+ class _MuslVersion(NamedTuple):
72
+ major: int
73
+ minor: int
74
+
75
+
76
+ def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
77
+ lines = [n for n in (n.strip() for n in output.splitlines()) if n]
78
+ if len(lines) < 2 or lines[0][:4] != "musl":
79
+ return None
80
+ m = re.match(r"Version (\d+)\.(\d+)", lines[1])
81
+ if not m:
82
+ return None
83
+ return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
84
+
85
+
86
+ @functools.lru_cache()
87
+ def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
88
+ """Detect currently-running musl runtime version.
89
+
90
+ This is done by checking the specified executable's dynamic linking
91
+ information, and invoking the loader to parse its output for a version
92
+ string. If the loader is musl, the output would be something like::
93
+
94
+ musl libc (x86_64)
95
+ Version 1.2.2
96
+ Dynamic Program Loader
97
+ """
98
+ with contextlib.ExitStack() as stack:
99
+ try:
100
+ f = stack.enter_context(open(executable, "rb"))
101
+ except IOError:
102
+ return None
103
+ ld = _parse_ld_musl_from_elf(f)
104
+ if not ld:
105
+ return None
106
+ proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
107
+ return _parse_musl_version(proc.stderr)
108
+
109
+
110
+ def platform_tags(arch: str) -> Iterator[str]:
111
+ """Generate musllinux tags compatible to the current platform.
112
+
113
+ :param arch: Should be the part of platform tag after the ``linux_``
114
+ prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
115
+ prerequisite for the current platform to be musllinux-compatible.
116
+
117
+ :returns: An iterator of compatible musllinux tags.
118
+ """
119
+ sys_musl = _get_musl_version(sys.executable)
120
+ if sys_musl is None: # Python not dynamically linked against musl.
121
+ return
122
+ for minor in range(sys_musl.minor, -1, -1):
123
+ yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
124
+
125
+
126
+ if __name__ == "__main__": # pragma: no cover
127
+ import sysconfig
128
+
129
+ plat = sysconfig.get_platform()
130
+ assert plat.startswith("linux-"), "not linux"
131
+
132
+ print("plat:", plat)
133
+ print("musl:", _get_musl_version(sys.executable))
134
+ print("tags:", end=" ")
135
+ for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
136
+ print(t, end="\n ")
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import logging
6
+ import platform
7
+ import sys
8
+ import sysconfig
9
+ from importlib.machinery import EXTENSION_SUFFIXES
10
+ from typing import (
11
+ Dict,
12
+ FrozenSet,
13
+ Iterable,
14
+ Iterator,
15
+ List,
16
+ Optional,
17
+ Sequence,
18
+ Tuple,
19
+ Union,
20
+ cast,
21
+ )
22
+
23
+ from . import _manylinux, _musllinux
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ PythonVersion = Sequence[int]
28
+ MacVersion = Tuple[int, int]
29
+
30
+ INTERPRETER_SHORT_NAMES: Dict[str, str] = {
31
+ "python": "py", # Generic.
32
+ "cpython": "cp",
33
+ "pypy": "pp",
34
+ "ironpython": "ip",
35
+ "jython": "jy",
36
+ }
37
+
38
+
39
+ _32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
40
+
41
+
42
+ class Tag:
43
+ """
44
+ A representation of the tag triple for a wheel.
45
+
46
+ Instances are considered immutable and thus are hashable. Equality checking
47
+ is also supported.
48
+ """
49
+
50
+ __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
51
+
52
+ def __init__(self, interpreter: str, abi: str, platform: str) -> None:
53
+ self._interpreter = interpreter.lower()
54
+ self._abi = abi.lower()
55
+ self._platform = platform.lower()
56
+ # The __hash__ of every single element in a Set[Tag] will be evaluated each time
57
+ # that a set calls its `.disjoint()` method, which may be called hundreds of
58
+ # times when scanning a page of links for packages with tags matching that
59
+ # Set[Tag]. Pre-computing the value here produces significant speedups for
60
+ # downstream consumers.
61
+ self._hash = hash((self._interpreter, self._abi, self._platform))
62
+
63
+ @property
64
+ def interpreter(self) -> str:
65
+ return self._interpreter
66
+
67
+ @property
68
+ def abi(self) -> str:
69
+ return self._abi
70
+
71
+ @property
72
+ def platform(self) -> str:
73
+ return self._platform
74
+
75
+ def __eq__(self, other: object) -> bool:
76
+ if not isinstance(other, Tag):
77
+ return NotImplemented
78
+
79
+ return (
80
+ (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
81
+ and (self._platform == other._platform)
82
+ and (self._abi == other._abi)
83
+ and (self._interpreter == other._interpreter)
84
+ )
85
+
86
+ def __hash__(self) -> int:
87
+ return self._hash
88
+
89
+ def __str__(self) -> str:
90
+ return f"{self._interpreter}-{self._abi}-{self._platform}"
91
+
92
+ def __repr__(self) -> str:
93
+ return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
94
+
95
+
96
+ def parse_tag(tag: str) -> FrozenSet[Tag]:
97
+ """
98
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
99
+
100
+ Returning a set is required due to the possibility that the tag is a
101
+ compressed tag set.
102
+ """
103
+ tags = set()
104
+ interpreters, abis, platforms = tag.split("-")
105
+ for interpreter in interpreters.split("."):
106
+ for abi in abis.split("."):
107
+ for platform_ in platforms.split("."):
108
+ tags.add(Tag(interpreter, abi, platform_))
109
+ return frozenset(tags)
110
+
111
+
112
+ def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
113
+ value = sysconfig.get_config_var(name)
114
+ if value is None and warn:
115
+ logger.debug(
116
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
117
+ )
118
+ return value
119
+
120
+
121
+ def _normalize_string(string: str) -> str:
122
+ return string.replace(".", "_").replace("-", "_")
123
+
124
+
125
+ def _abi3_applies(python_version: PythonVersion) -> bool:
126
+ """
127
+ Determine if the Python version supports abi3.
128
+
129
+ PEP 384 was first implemented in Python 3.2.
130
+ """
131
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
132
+
133
+
134
+ def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
135
+ py_version = tuple(py_version) # To allow for version comparison.
136
+ abis = []
137
+ version = _version_nodot(py_version[:2])
138
+ debug = pymalloc = ucs4 = ""
139
+ with_debug = _get_config_var("Py_DEBUG", warn)
140
+ has_refcount = hasattr(sys, "gettotalrefcount")
141
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
142
+ # extension modules is the best option.
143
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
144
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
145
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
146
+ debug = "d"
147
+ if py_version < (3, 8):
148
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
149
+ if with_pymalloc or with_pymalloc is None:
150
+ pymalloc = "m"
151
+ if py_version < (3, 3):
152
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
153
+ if unicode_size == 4 or (
154
+ unicode_size is None and sys.maxunicode == 0x10FFFF
155
+ ):
156
+ ucs4 = "u"
157
+ elif debug:
158
+ # Debug builds can also load "normal" extension modules.
159
+ # We can also assume no UCS-4 or pymalloc requirement.
160
+ abis.append(f"cp{version}")
161
+ abis.insert(
162
+ 0,
163
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
164
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
165
+ ),
166
+ )
167
+ return abis
168
+
169
+
170
+ def cpython_tags(
171
+ python_version: Optional[PythonVersion] = None,
172
+ abis: Optional[Iterable[str]] = None,
173
+ platforms: Optional[Iterable[str]] = None,
174
+ *,
175
+ warn: bool = False,
176
+ ) -> Iterator[Tag]:
177
+ """
178
+ Yields the tags for a CPython interpreter.
179
+
180
+ The tags consist of:
181
+ - cp<python_version>-<abi>-<platform>
182
+ - cp<python_version>-abi3-<platform>
183
+ - cp<python_version>-none-<platform>
184
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
185
+
186
+ If python_version only specifies a major version then user-provided ABIs and
187
+ the 'none' ABItag will be used.
188
+
189
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
190
+ their normal position and not at the beginning.
191
+ """
192
+ if not python_version:
193
+ python_version = sys.version_info[:2]
194
+
195
+ interpreter = "cp{}".format(_version_nodot(python_version[:2]))
196
+
197
+ if abis is None:
198
+ if len(python_version) > 1:
199
+ abis = _cpython_abis(python_version, warn)
200
+ else:
201
+ abis = []
202
+ abis = list(abis)
203
+ # 'abi3' and 'none' are explicitly handled later.
204
+ for explicit_abi in ("abi3", "none"):
205
+ try:
206
+ abis.remove(explicit_abi)
207
+ except ValueError:
208
+ pass
209
+
210
+ platforms = list(platforms or platform_tags())
211
+ for abi in abis:
212
+ for platform_ in platforms:
213
+ yield Tag(interpreter, abi, platform_)
214
+ if _abi3_applies(python_version):
215
+ yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
216
+ yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
217
+
218
+ if _abi3_applies(python_version):
219
+ for minor_version in range(python_version[1] - 1, 1, -1):
220
+ for platform_ in platforms:
221
+ interpreter = "cp{version}".format(
222
+ version=_version_nodot((python_version[0], minor_version))
223
+ )
224
+ yield Tag(interpreter, "abi3", platform_)
225
+
226
+
227
+ def _generic_abi() -> Iterator[str]:
228
+ abi = sysconfig.get_config_var("SOABI")
229
+ if abi:
230
+ yield _normalize_string(abi)
231
+
232
+
233
+ def generic_tags(
234
+ interpreter: Optional[str] = None,
235
+ abis: Optional[Iterable[str]] = None,
236
+ platforms: Optional[Iterable[str]] = None,
237
+ *,
238
+ warn: bool = False,
239
+ ) -> Iterator[Tag]:
240
+ """
241
+ Yields the tags for a generic interpreter.
242
+
243
+ The tags consist of:
244
+ - <interpreter>-<abi>-<platform>
245
+
246
+ The "none" ABI will be added if it was not explicitly provided.
247
+ """
248
+ if not interpreter:
249
+ interp_name = interpreter_name()
250
+ interp_version = interpreter_version(warn=warn)
251
+ interpreter = "".join([interp_name, interp_version])
252
+ if abis is None:
253
+ abis = _generic_abi()
254
+ platforms = list(platforms or platform_tags())
255
+ abis = list(abis)
256
+ if "none" not in abis:
257
+ abis.append("none")
258
+ for abi in abis:
259
+ for platform_ in platforms:
260
+ yield Tag(interpreter, abi, platform_)
261
+
262
+
263
+ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
264
+ """
265
+ Yields Python versions in descending order.
266
+
267
+ After the latest version, the major-only version will be yielded, and then
268
+ all previous versions of that major version.
269
+ """
270
+ if len(py_version) > 1:
271
+ yield "py{version}".format(version=_version_nodot(py_version[:2]))
272
+ yield "py{major}".format(major=py_version[0])
273
+ if len(py_version) > 1:
274
+ for minor in range(py_version[1] - 1, -1, -1):
275
+ yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
276
+
277
+
278
+ def compatible_tags(
279
+ python_version: Optional[PythonVersion] = None,
280
+ interpreter: Optional[str] = None,
281
+ platforms: Optional[Iterable[str]] = None,
282
+ ) -> Iterator[Tag]:
283
+ """
284
+ Yields the sequence of tags that are compatible with a specific version of Python.
285
+
286
+ The tags consist of:
287
+ - py*-none-<platform>
288
+ - <interpreter>-none-any # ... if `interpreter` is provided.
289
+ - py*-none-any
290
+ """
291
+ if not python_version:
292
+ python_version = sys.version_info[:2]
293
+ platforms = list(platforms or platform_tags())
294
+ for version in _py_interpreter_range(python_version):
295
+ for platform_ in platforms:
296
+ yield Tag(version, "none", platform_)
297
+ if interpreter:
298
+ yield Tag(interpreter, "none", "any")
299
+ for version in _py_interpreter_range(python_version):
300
+ yield Tag(version, "none", "any")
301
+
302
+
303
+ def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
304
+ if not is_32bit:
305
+ return arch
306
+
307
+ if arch.startswith("ppc"):
308
+ return "ppc"
309
+
310
+ return "i386"
311
+
312
+
313
+ def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
314
+ formats = [cpu_arch]
315
+ if cpu_arch == "x86_64":
316
+ if version < (10, 4):
317
+ return []
318
+ formats.extend(["intel", "fat64", "fat32"])
319
+
320
+ elif cpu_arch == "i386":
321
+ if version < (10, 4):
322
+ return []
323
+ formats.extend(["intel", "fat32", "fat"])
324
+
325
+ elif cpu_arch == "ppc64":
326
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
327
+ if version > (10, 5) or version < (10, 4):
328
+ return []
329
+ formats.append("fat64")
330
+
331
+ elif cpu_arch == "ppc":
332
+ if version > (10, 6):
333
+ return []
334
+ formats.extend(["fat32", "fat"])
335
+
336
+ if cpu_arch in {"arm64", "x86_64"}:
337
+ formats.append("universal2")
338
+
339
+ if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
340
+ formats.append("universal")
341
+
342
+ return formats
343
+
344
+
345
+ def mac_platforms(
346
+ version: Optional[MacVersion] = None, arch: Optional[str] = None
347
+ ) -> Iterator[str]:
348
+ """
349
+ Yields the platform tags for a macOS system.
350
+
351
+ The `version` parameter is a two-item tuple specifying the macOS version to
352
+ generate platform tags for. The `arch` parameter is the CPU architecture to
353
+ generate platform tags for. Both parameters default to the appropriate value
354
+ for the current system.
355
+ """
356
+ version_str, _, cpu_arch = platform.mac_ver()
357
+ if version is None:
358
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
359
+ else:
360
+ version = version
361
+ if arch is None:
362
+ arch = _mac_arch(cpu_arch)
363
+ else:
364
+ arch = arch
365
+
366
+ if (10, 0) <= version and version < (11, 0):
367
+ # Prior to Mac OS 11, each yearly release of Mac OS bumped the
368
+ # "minor" version number. The major version was always 10.
369
+ for minor_version in range(version[1], -1, -1):
370
+ compat_version = 10, minor_version
371
+ binary_formats = _mac_binary_formats(compat_version, arch)
372
+ for binary_format in binary_formats:
373
+ yield "macosx_{major}_{minor}_{binary_format}".format(
374
+ major=10, minor=minor_version, binary_format=binary_format
375
+ )
376
+
377
+ if version >= (11, 0):
378
+ # Starting with Mac OS 11, each yearly release bumps the major version
379
+ # number. The minor versions are now the midyear updates.
380
+ for major_version in range(version[0], 10, -1):
381
+ compat_version = major_version, 0
382
+ binary_formats = _mac_binary_formats(compat_version, arch)
383
+ for binary_format in binary_formats:
384
+ yield "macosx_{major}_{minor}_{binary_format}".format(
385
+ major=major_version, minor=0, binary_format=binary_format
386
+ )
387
+
388
+ if version >= (11, 0):
389
+ # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
390
+ # Arm64 support was introduced in 11.0, so no Arm binaries from previous
391
+ # releases exist.
392
+ #
393
+ # However, the "universal2" binary format can have a
394
+ # macOS version earlier than 11.0 when the x86_64 part of the binary supports
395
+ # that version of macOS.
396
+ if arch == "x86_64":
397
+ for minor_version in range(16, 3, -1):
398
+ compat_version = 10, minor_version
399
+ binary_formats = _mac_binary_formats(compat_version, arch)
400
+ for binary_format in binary_formats:
401
+ yield "macosx_{major}_{minor}_{binary_format}".format(
402
+ major=compat_version[0],
403
+ minor=compat_version[1],
404
+ binary_format=binary_format,
405
+ )
406
+ else:
407
+ for minor_version in range(16, 3, -1):
408
+ compat_version = 10, minor_version
409
+ binary_format = "universal2"
410
+ yield "macosx_{major}_{minor}_{binary_format}".format(
411
+ major=compat_version[0],
412
+ minor=compat_version[1],
413
+ binary_format=binary_format,
414
+ )
415
+
416
+
417
+ def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
418
+ linux = _normalize_string(sysconfig.get_platform())
419
+ if is_32bit:
420
+ if linux == "linux_x86_64":
421
+ linux = "linux_i686"
422
+ elif linux == "linux_aarch64":
423
+ linux = "linux_armv7l"
424
+ _, arch = linux.split("_", 1)
425
+ yield from _manylinux.platform_tags(linux, arch)
426
+ yield from _musllinux.platform_tags(arch)
427
+ yield linux
428
+
429
+
430
+ def _generic_platforms() -> Iterator[str]:
431
+ yield _normalize_string(sysconfig.get_platform())
432
+
433
+
434
+ def platform_tags() -> Iterator[str]:
435
+ """
436
+ Provides the platform tags for this installation.
437
+ """
438
+ if platform.system() == "Darwin":
439
+ return mac_platforms()
440
+ elif platform.system() == "Linux":
441
+ return _linux_platforms()
442
+ else:
443
+ return _generic_platforms()
444
+
445
+
446
+ def interpreter_name() -> str:
447
+ """
448
+ Returns the name of the running interpreter.
449
+ """
450
+ name = sys.implementation.name
451
+ return INTERPRETER_SHORT_NAMES.get(name) or name
452
+
453
+
454
+ def interpreter_version(*, warn: bool = False) -> str:
455
+ """
456
+ Returns the version of the running interpreter.
457
+ """
458
+ version = _get_config_var("py_version_nodot", warn=warn)
459
+ if version:
460
+ version = str(version)
461
+ else:
462
+ version = _version_nodot(sys.version_info[:2])
463
+ return version
464
+
465
+
466
+ def _version_nodot(version: PythonVersion) -> str:
467
+ return "".join(map(str, version))
468
+
469
+
470
+ def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
471
+ """
472
+ Returns the sequence of tag triples for the running interpreter.
473
+
474
+ The order of the sequence corresponds to priority order for the
475
+ interpreter, from most to least important.
476
+ """
477
+
478
+ interp_name = interpreter_name()
479
+ if interp_name == "cp":
480
+ yield from cpython_tags(warn=warn)
481
+ else:
482
+ yield from generic_tags()
483
+
484
+ yield from compatible_tags()
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import re
6
+ from typing import FrozenSet, NewType, Tuple, Union, cast
7
+
8
+ from .tags import Tag, parse_tag
9
+ from .version import InvalidVersion, Version
10
+
11
+ BuildTag = Union[Tuple[()], Tuple[int, str]]
12
+ NormalizedName = NewType("NormalizedName", str)
13
+
14
+
15
+ class InvalidWheelFilename(ValueError):
16
+ """
17
+ An invalid wheel filename was found, users should refer to PEP 427.
18
+ """
19
+
20
+
21
+ class InvalidSdistFilename(ValueError):
22
+ """
23
+ An invalid sdist filename was found, users should refer to the packaging user guide.
24
+ """
25
+
26
+
27
+ _canonicalize_regex = re.compile(r"[-_.]+")
28
+ # PEP 427: The build number must start with a digit.
29
+ _build_tag_regex = re.compile(r"(\d+)(.*)")
30
+
31
+
32
+ def canonicalize_name(name: str) -> NormalizedName:
33
+ # This is taken from PEP 503.
34
+ value = _canonicalize_regex.sub("-", name).lower()
35
+ return cast(NormalizedName, value)
36
+
37
+
38
+ def canonicalize_version(version: Union[Version, str]) -> str:
39
+ """
40
+ This is very similar to Version.__str__, but has one subtle difference
41
+ with the way it handles the release segment.
42
+ """
43
+ if isinstance(version, str):
44
+ try:
45
+ parsed = Version(version)
46
+ except InvalidVersion:
47
+ # Legacy versions cannot be normalized
48
+ return version
49
+ else:
50
+ parsed = version
51
+
52
+ parts = []
53
+
54
+ # Epoch
55
+ if parsed.epoch != 0:
56
+ parts.append(f"{parsed.epoch}!")
57
+
58
+ # Release segment
59
+ # NB: This strips trailing '.0's to normalize
60
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
61
+
62
+ # Pre-release
63
+ if parsed.pre is not None:
64
+ parts.append("".join(str(x) for x in parsed.pre))
65
+
66
+ # Post-release
67
+ if parsed.post is not None:
68
+ parts.append(f".post{parsed.post}")
69
+
70
+ # Development release
71
+ if parsed.dev is not None:
72
+ parts.append(f".dev{parsed.dev}")
73
+
74
+ # Local version segment
75
+ if parsed.local is not None:
76
+ parts.append(f"+{parsed.local}")
77
+
78
+ return "".join(parts)
79
+
80
+
81
+ def parse_wheel_filename(
82
+ filename: str,
83
+ ) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
84
+ if not filename.endswith(".whl"):
85
+ raise InvalidWheelFilename(
86
+ f"Invalid wheel filename (extension must be '.whl'): {filename}"
87
+ )
88
+
89
+ filename = filename[:-4]
90
+ dashes = filename.count("-")
91
+ if dashes not in (4, 5):
92
+ raise InvalidWheelFilename(
93
+ f"Invalid wheel filename (wrong number of parts): {filename}"
94
+ )
95
+
96
+ parts = filename.split("-", dashes - 2)
97
+ name_part = parts[0]
98
+ # See PEP 427 for the rules on escaping the project name
99
+ if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
100
+ raise InvalidWheelFilename(f"Invalid project name: {filename}")
101
+ name = canonicalize_name(name_part)
102
+ version = Version(parts[1])
103
+ if dashes == 5:
104
+ build_part = parts[2]
105
+ build_match = _build_tag_regex.match(build_part)
106
+ if build_match is None:
107
+ raise InvalidWheelFilename(
108
+ f"Invalid build number: {build_part} in '{filename}'"
109
+ )
110
+ build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
111
+ else:
112
+ build = ()
113
+ tags = parse_tag(parts[-1])
114
+ return (name, version, build, tags)
115
+
116
+
117
+ def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
118
+ if filename.endswith(".tar.gz"):
119
+ file_stem = filename[: -len(".tar.gz")]
120
+ elif filename.endswith(".zip"):
121
+ file_stem = filename[: -len(".zip")]
122
+ else:
123
+ raise InvalidSdistFilename(
124
+ f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
125
+ f" {filename}"
126
+ )
127
+
128
+ # We are requiring a PEP 440 version, which cannot contain dashes,
129
+ # so we split on the last dash.
130
+ name_part, sep, version_part = file_stem.rpartition("-")
131
+ if not sep:
132
+ raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
133
+
134
+ name = canonicalize_name(name_part)
135
+ version = Version(version_part)
136
+ return (name, version)
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import collections
6
+ import itertools
7
+ import re
8
+ import warnings
9
+ from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
10
+
11
+ from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
12
+
13
+ __all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
14
+
15
+ InfiniteTypes = Union[InfinityType, NegativeInfinityType]
16
+ PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
17
+ SubLocalType = Union[InfiniteTypes, int, str]
18
+ LocalType = Union[
19
+ NegativeInfinityType,
20
+ Tuple[
21
+ Union[
22
+ SubLocalType,
23
+ Tuple[SubLocalType, str],
24
+ Tuple[NegativeInfinityType, SubLocalType],
25
+ ],
26
+ ...,
27
+ ],
28
+ ]
29
+ CmpKey = Tuple[
30
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
31
+ ]
32
+ LegacyCmpKey = Tuple[int, Tuple[str, ...]]
33
+ VersionComparisonMethod = Callable[
34
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
35
+ ]
36
+
37
+ _Version = collections.namedtuple(
38
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
39
+ )
40
+
41
+
42
+ def parse(version: str) -> Union["LegacyVersion", "Version"]:
43
+ """
44
+ Parse the given version string and return either a :class:`Version` object
45
+ or a :class:`LegacyVersion` object depending on if the given version is
46
+ a valid PEP 440 version or a legacy version.
47
+ """
48
+ try:
49
+ return Version(version)
50
+ except InvalidVersion:
51
+ return LegacyVersion(version)
52
+
53
+
54
+ class InvalidVersion(ValueError):
55
+ """
56
+ An invalid version was found, users should refer to PEP 440.
57
+ """
58
+
59
+
60
+ class _BaseVersion:
61
+ _key: Union[CmpKey, LegacyCmpKey]
62
+
63
+ def __hash__(self) -> int:
64
+ return hash(self._key)
65
+
66
+ # Please keep the duplicated `isinstance` check
67
+ # in the six comparisons hereunder
68
+ # unless you find a way to avoid adding overhead function calls.
69
+ def __lt__(self, other: "_BaseVersion") -> bool:
70
+ if not isinstance(other, _BaseVersion):
71
+ return NotImplemented
72
+
73
+ return self._key < other._key
74
+
75
+ def __le__(self, other: "_BaseVersion") -> bool:
76
+ if not isinstance(other, _BaseVersion):
77
+ return NotImplemented
78
+
79
+ return self._key <= other._key
80
+
81
+ def __eq__(self, other: object) -> bool:
82
+ if not isinstance(other, _BaseVersion):
83
+ return NotImplemented
84
+
85
+ return self._key == other._key
86
+
87
+ def __ge__(self, other: "_BaseVersion") -> bool:
88
+ if not isinstance(other, _BaseVersion):
89
+ return NotImplemented
90
+
91
+ return self._key >= other._key
92
+
93
+ def __gt__(self, other: "_BaseVersion") -> bool:
94
+ if not isinstance(other, _BaseVersion):
95
+ return NotImplemented
96
+
97
+ return self._key > other._key
98
+
99
+ def __ne__(self, other: object) -> bool:
100
+ if not isinstance(other, _BaseVersion):
101
+ return NotImplemented
102
+
103
+ return self._key != other._key
104
+
105
+
106
+ class LegacyVersion(_BaseVersion):
107
+ def __init__(self, version: str) -> None:
108
+ self._version = str(version)
109
+ self._key = _legacy_cmpkey(self._version)
110
+
111
+ warnings.warn(
112
+ "Creating a LegacyVersion has been deprecated and will be "
113
+ "removed in the next major release",
114
+ DeprecationWarning,
115
+ )
116
+
117
+ def __str__(self) -> str:
118
+ return self._version
119
+
120
+ def __repr__(self) -> str:
121
+ return f"<LegacyVersion('{self}')>"
122
+
123
+ @property
124
+ def public(self) -> str:
125
+ return self._version
126
+
127
+ @property
128
+ def base_version(self) -> str:
129
+ return self._version
130
+
131
+ @property
132
+ def epoch(self) -> int:
133
+ return -1
134
+
135
+ @property
136
+ def release(self) -> None:
137
+ return None
138
+
139
+ @property
140
+ def pre(self) -> None:
141
+ return None
142
+
143
+ @property
144
+ def post(self) -> None:
145
+ return None
146
+
147
+ @property
148
+ def dev(self) -> None:
149
+ return None
150
+
151
+ @property
152
+ def local(self) -> None:
153
+ return None
154
+
155
+ @property
156
+ def is_prerelease(self) -> bool:
157
+ return False
158
+
159
+ @property
160
+ def is_postrelease(self) -> bool:
161
+ return False
162
+
163
+ @property
164
+ def is_devrelease(self) -> bool:
165
+ return False
166
+
167
+
168
+ _legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
169
+
170
+ _legacy_version_replacement_map = {
171
+ "pre": "c",
172
+ "preview": "c",
173
+ "-": "final-",
174
+ "rc": "c",
175
+ "dev": "@",
176
+ }
177
+
178
+
179
+ def _parse_version_parts(s: str) -> Iterator[str]:
180
+ for part in _legacy_version_component_re.split(s):
181
+ part = _legacy_version_replacement_map.get(part, part)
182
+
183
+ if not part or part == ".":
184
+ continue
185
+
186
+ if part[:1] in "0123456789":
187
+ # pad for numeric comparison
188
+ yield part.zfill(8)
189
+ else:
190
+ yield "*" + part
191
+
192
+ # ensure that alpha/beta/candidate are before final
193
+ yield "*final"
194
+
195
+
196
+ def _legacy_cmpkey(version: str) -> LegacyCmpKey:
197
+
198
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
199
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
200
+ # which uses the defacto standard originally implemented by setuptools,
201
+ # as before all PEP 440 versions.
202
+ epoch = -1
203
+
204
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
205
+ # it's adoption of the packaging library.
206
+ parts: List[str] = []
207
+ for part in _parse_version_parts(version.lower()):
208
+ if part.startswith("*"):
209
+ # remove "-" before a prerelease tag
210
+ if part < "*final":
211
+ while parts and parts[-1] == "*final-":
212
+ parts.pop()
213
+
214
+ # remove trailing zeros from each series of numeric parts
215
+ while parts and parts[-1] == "00000000":
216
+ parts.pop()
217
+
218
+ parts.append(part)
219
+
220
+ return epoch, tuple(parts)
221
+
222
+
223
+ # Deliberately not anchored to the start and end of the string, to make it
224
+ # easier for 3rd party code to reuse
225
+ VERSION_PATTERN = r"""
226
+ v?
227
+ (?:
228
+ (?:(?P<epoch>[0-9]+)!)? # epoch
229
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
230
+ (?P<pre> # pre-release
231
+ [-_\.]?
232
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
233
+ [-_\.]?
234
+ (?P<pre_n>[0-9]+)?
235
+ )?
236
+ (?P<post> # post release
237
+ (?:-(?P<post_n1>[0-9]+))
238
+ |
239
+ (?:
240
+ [-_\.]?
241
+ (?P<post_l>post|rev|r)
242
+ [-_\.]?
243
+ (?P<post_n2>[0-9]+)?
244
+ )
245
+ )?
246
+ (?P<dev> # dev release
247
+ [-_\.]?
248
+ (?P<dev_l>dev)
249
+ [-_\.]?
250
+ (?P<dev_n>[0-9]+)?
251
+ )?
252
+ )
253
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
254
+ """
255
+
256
+
257
+ class Version(_BaseVersion):
258
+
259
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
260
+
261
+ def __init__(self, version: str) -> None:
262
+
263
+ # Validate the version and parse it into pieces
264
+ match = self._regex.search(version)
265
+ if not match:
266
+ raise InvalidVersion(f"Invalid version: '{version}'")
267
+
268
+ # Store the parsed out pieces of the version
269
+ self._version = _Version(
270
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
271
+ release=tuple(int(i) for i in match.group("release").split(".")),
272
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
273
+ post=_parse_letter_version(
274
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
275
+ ),
276
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
277
+ local=_parse_local_version(match.group("local")),
278
+ )
279
+
280
+ # Generate a key which will be used for sorting
281
+ self._key = _cmpkey(
282
+ self._version.epoch,
283
+ self._version.release,
284
+ self._version.pre,
285
+ self._version.post,
286
+ self._version.dev,
287
+ self._version.local,
288
+ )
289
+
290
+ def __repr__(self) -> str:
291
+ return f"<Version('{self}')>"
292
+
293
+ def __str__(self) -> str:
294
+ parts = []
295
+
296
+ # Epoch
297
+ if self.epoch != 0:
298
+ parts.append(f"{self.epoch}!")
299
+
300
+ # Release segment
301
+ parts.append(".".join(str(x) for x in self.release))
302
+
303
+ # Pre-release
304
+ if self.pre is not None:
305
+ parts.append("".join(str(x) for x in self.pre))
306
+
307
+ # Post-release
308
+ if self.post is not None:
309
+ parts.append(f".post{self.post}")
310
+
311
+ # Development release
312
+ if self.dev is not None:
313
+ parts.append(f".dev{self.dev}")
314
+
315
+ # Local version segment
316
+ if self.local is not None:
317
+ parts.append(f"+{self.local}")
318
+
319
+ return "".join(parts)
320
+
321
+ @property
322
+ def epoch(self) -> int:
323
+ _epoch: int = self._version.epoch
324
+ return _epoch
325
+
326
+ @property
327
+ def release(self) -> Tuple[int, ...]:
328
+ _release: Tuple[int, ...] = self._version.release
329
+ return _release
330
+
331
+ @property
332
+ def pre(self) -> Optional[Tuple[str, int]]:
333
+ _pre: Optional[Tuple[str, int]] = self._version.pre
334
+ return _pre
335
+
336
+ @property
337
+ def post(self) -> Optional[int]:
338
+ return self._version.post[1] if self._version.post else None
339
+
340
+ @property
341
+ def dev(self) -> Optional[int]:
342
+ return self._version.dev[1] if self._version.dev else None
343
+
344
+ @property
345
+ def local(self) -> Optional[str]:
346
+ if self._version.local:
347
+ return ".".join(str(x) for x in self._version.local)
348
+ else:
349
+ return None
350
+
351
+ @property
352
+ def public(self) -> str:
353
+ return str(self).split("+", 1)[0]
354
+
355
+ @property
356
+ def base_version(self) -> str:
357
+ parts = []
358
+
359
+ # Epoch
360
+ if self.epoch != 0:
361
+ parts.append(f"{self.epoch}!")
362
+
363
+ # Release segment
364
+ parts.append(".".join(str(x) for x in self.release))
365
+
366
+ return "".join(parts)
367
+
368
+ @property
369
+ def is_prerelease(self) -> bool:
370
+ return self.dev is not None or self.pre is not None
371
+
372
+ @property
373
+ def is_postrelease(self) -> bool:
374
+ return self.post is not None
375
+
376
+ @property
377
+ def is_devrelease(self) -> bool:
378
+ return self.dev is not None
379
+
380
+ @property
381
+ def major(self) -> int:
382
+ return self.release[0] if len(self.release) >= 1 else 0
383
+
384
+ @property
385
+ def minor(self) -> int:
386
+ return self.release[1] if len(self.release) >= 2 else 0
387
+
388
+ @property
389
+ def micro(self) -> int:
390
+ return self.release[2] if len(self.release) >= 3 else 0
391
+
392
+
393
+ def _parse_letter_version(
394
+ letter: str, number: Union[str, bytes, SupportsInt]
395
+ ) -> Optional[Tuple[str, int]]:
396
+
397
+ if letter:
398
+ # We consider there to be an implicit 0 in a pre-release if there is
399
+ # not a numeral associated with it.
400
+ if number is None:
401
+ number = 0
402
+
403
+ # We normalize any letters to their lower case form
404
+ letter = letter.lower()
405
+
406
+ # We consider some words to be alternate spellings of other words and
407
+ # in those cases we want to normalize the spellings to our preferred
408
+ # spelling.
409
+ if letter == "alpha":
410
+ letter = "a"
411
+ elif letter == "beta":
412
+ letter = "b"
413
+ elif letter in ["c", "pre", "preview"]:
414
+ letter = "rc"
415
+ elif letter in ["rev", "r"]:
416
+ letter = "post"
417
+
418
+ return letter, int(number)
419
+ if not letter and number:
420
+ # We assume if we are given a number, but we are not given a letter
421
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
422
+ letter = "post"
423
+
424
+ return letter, int(number)
425
+
426
+ return None
427
+
428
+
429
+ _local_version_separators = re.compile(r"[\._-]")
430
+
431
+
432
+ def _parse_local_version(local: str) -> Optional[LocalType]:
433
+ """
434
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
435
+ """
436
+ if local is not None:
437
+ return tuple(
438
+ part.lower() if not part.isdigit() else int(part)
439
+ for part in _local_version_separators.split(local)
440
+ )
441
+ return None
442
+
443
+
444
+ def _cmpkey(
445
+ epoch: int,
446
+ release: Tuple[int, ...],
447
+ pre: Optional[Tuple[str, int]],
448
+ post: Optional[Tuple[str, int]],
449
+ dev: Optional[Tuple[str, int]],
450
+ local: Optional[Tuple[SubLocalType]],
451
+ ) -> CmpKey:
452
+
453
+ # When we compare a release version, we want to compare it with all of the
454
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
455
+ # leading zeros until we come to something non zero, then take the rest
456
+ # re-reverse it back into the correct order and make it a tuple and use
457
+ # that for our sorting key.
458
+ _release = tuple(
459
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
460
+ )
461
+
462
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
463
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
464
+ # if there is not a pre or a post segment. If we have one of those then
465
+ # the normal sorting rules will handle this case correctly.
466
+ if pre is None and post is None and dev is not None:
467
+ _pre: PrePostDevType = NegativeInfinity
468
+ # Versions without a pre-release (except as noted above) should sort after
469
+ # those with one.
470
+ elif pre is None:
471
+ _pre = Infinity
472
+ else:
473
+ _pre = pre
474
+
475
+ # Versions without a post segment should sort before those with one.
476
+ if post is None:
477
+ _post: PrePostDevType = NegativeInfinity
478
+
479
+ else:
480
+ _post = post
481
+
482
+ # Versions without a development segment should sort after those with one.
483
+ if dev is None:
484
+ _dev: PrePostDevType = Infinity
485
+
486
+ else:
487
+ _dev = dev
488
+
489
+ if local is None:
490
+ # Versions without a local segment should sort before those with one.
491
+ _local: LocalType = NegativeInfinity
492
+ else:
493
+ # Versions with a local segment need that segment parsed to implement
494
+ # the sorting rules in PEP440.
495
+ # - Alpha numeric segments sort before numeric segments
496
+ # - Alpha numeric segments sort lexicographically
497
+ # - Numeric segments sort numerically
498
+ # - Shorter versions sort before longer versions when the prefixes
499
+ # match exactly
500
+ _local = tuple(
501
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
502
+ )
503
+
504
+ return epoch, _release, _pre, _post, _dev, _local
env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/extern/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.util
2
+ import sys
3
+
4
+
5
+ class VendorImporter:
6
+ """
7
+ A PEP 302 meta path importer for finding optionally-vendored
8
+ or otherwise naturally-installed packages from root_name.
9
+ """
10
+
11
+ def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
12
+ self.root_name = root_name
13
+ self.vendored_names = set(vendored_names)
14
+ self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
15
+
16
+ @property
17
+ def search_path(self):
18
+ """
19
+ Search first the vendor package then as a natural package.
20
+ """
21
+ yield self.vendor_pkg + '.'
22
+ yield ''
23
+
24
+ def _module_matches_namespace(self, fullname):
25
+ """Figure out if the target module is vendored."""
26
+ root, base, target = fullname.partition(self.root_name + '.')
27
+ return not root and any(map(target.startswith, self.vendored_names))
28
+
29
+ def load_module(self, fullname):
30
+ """
31
+ Iterate over the search path to locate and load fullname.
32
+ """
33
+ root, base, target = fullname.partition(self.root_name + '.')
34
+ for prefix in self.search_path:
35
+ try:
36
+ extant = prefix + target
37
+ __import__(extant)
38
+ mod = sys.modules[extant]
39
+ sys.modules[fullname] = mod
40
+ return mod
41
+ except ImportError:
42
+ pass
43
+ else:
44
+ raise ImportError(
45
+ "The '{target}' package is required; "
46
+ "normally this is bundled with this package so if you get "
47
+ "this warning, consult the packager of your "
48
+ "distribution.".format(**locals())
49
+ )
50
+
51
+ def create_module(self, spec):
52
+ return self.load_module(spec.name)
53
+
54
+ def exec_module(self, module):
55
+ pass
56
+
57
+ def find_spec(self, fullname, path=None, target=None):
58
+ """Return a module spec for vendored names."""
59
+ return (
60
+ importlib.util.spec_from_loader(fullname, self)
61
+ if self._module_matches_namespace(fullname) else None
62
+ )
63
+
64
+ def install(self):
65
+ """
66
+ Install this importer into sys.meta_path if not already present.
67
+ """
68
+ if self not in sys.meta_path:
69
+ sys.meta_path.append(self)
70
+
71
+
72
+ names = 'packaging', 'pyparsing', 'appdirs'
73
+ VendorImporter(__name__, names).install()
env-llmeval/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc ADDED
Binary file (314 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/setup.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import setuptools
2
+ setuptools.setup(
3
+ name="my-test-package",
4
+ version="1.0",
5
+ zip_safe=True,
6
+ )
env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This work was derived from the 're' module of CPython 2.6 and CPython 3.1,
2
+ copyright (c) 1998-2001 by Secret Labs AB and licensed under CNRI's Python 1.6
3
+ license.
4
+
5
+ All additions and alterations are licensed under the Apache 2.0 License.
6
+
7
+
8
+ Apache License
9
+ Version 2.0, January 2004
10
+ http://www.apache.org/licenses/
11
+
12
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
13
+
14
+ 1. Definitions.
15
+
16
+ "License" shall mean the terms and conditions for use, reproduction,
17
+ and distribution as defined by Sections 1 through 9 of this document.
18
+
19
+ "Licensor" shall mean the copyright owner or entity authorized by
20
+ the copyright owner that is granting the License.
21
+
22
+ "Legal Entity" shall mean the union of the acting entity and all
23
+ other entities that control, are controlled by, or are under common
24
+ control with that entity. For the purposes of this definition,
25
+ "control" means (i) the power, direct or indirect, to cause the
26
+ direction or management of such entity, whether by contract or
27
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
28
+ outstanding shares, or (iii) beneficial ownership of such entity.
29
+
30
+ "You" (or "Your") shall mean an individual or Legal Entity
31
+ exercising permissions granted by this License.
32
+
33
+ "Source" form shall mean the preferred form for making modifications,
34
+ including but not limited to software source code, documentation
35
+ source, and configuration files.
36
+
37
+ "Object" form shall mean any form resulting from mechanical
38
+ transformation or translation of a Source form, including but
39
+ not limited to compiled object code, generated documentation,
40
+ and conversions to other media types.
41
+
42
+ "Work" shall mean the work of authorship, whether in Source or
43
+ Object form, made available under the License, as indicated by a
44
+ copyright notice that is included in or attached to the work
45
+ (an example is provided in the Appendix below).
46
+
47
+ "Derivative Works" shall mean any work, whether in Source or Object
48
+ form, that is based on (or derived from) the Work and for which the
49
+ editorial revisions, annotations, elaborations, or other modifications
50
+ represent, as a whole, an original work of authorship. For the purposes
51
+ of this License, Derivative Works shall not include works that remain
52
+ separable from, or merely link (or bind by name) to the interfaces of,
53
+ the Work and Derivative Works thereof.
54
+
55
+ "Contribution" shall mean any work of authorship, including
56
+ the original version of the Work and any modifications or additions
57
+ to that Work or Derivative Works thereof, that is intentionally
58
+ submitted to Licensor for inclusion in the Work by the copyright owner
59
+ or by an individual or Legal Entity authorized to submit on behalf of
60
+ the copyright owner. For the purposes of this definition, "submitted"
61
+ means any form of electronic, verbal, or written communication sent
62
+ to the Licensor or its representatives, including but not limited to
63
+ communication on electronic mailing lists, source code control systems,
64
+ and issue tracking systems that are managed by, or on behalf of, the
65
+ Licensor for the purpose of discussing and improving the Work, but
66
+ excluding communication that is conspicuously marked or otherwise
67
+ designated in writing by the copyright owner as "Not a Contribution."
68
+
69
+ "Contributor" shall mean Licensor and any individual or Legal Entity
70
+ on behalf of whom a Contribution has been received by Licensor and
71
+ subsequently incorporated within the Work.
72
+
73
+ 2. Grant of Copyright License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ copyright license to reproduce, prepare Derivative Works of,
77
+ publicly display, publicly perform, sublicense, and distribute the
78
+ Work and such Derivative Works in Source or Object form.
79
+
80
+ 3. Grant of Patent License. Subject to the terms and conditions of
81
+ this License, each Contributor hereby grants to You a perpetual,
82
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
83
+ (except as stated in this section) patent license to make, have made,
84
+ use, offer to sell, sell, import, and otherwise transfer the Work,
85
+ where such license applies only to those patent claims licensable
86
+ by such Contributor that are necessarily infringed by their
87
+ Contribution(s) alone or by combination of their Contribution(s)
88
+ with the Work to which such Contribution(s) was submitted. If You
89
+ institute patent litigation against any entity (including a
90
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
91
+ or a Contribution incorporated within the Work constitutes direct
92
+ or contributory patent infringement, then any patent licenses
93
+ granted to You under this License for that Work shall terminate
94
+ as of the date such litigation is filed.
95
+
96
+ 4. Redistribution. You may reproduce and distribute copies of the
97
+ Work or Derivative Works thereof in any medium, with or without
98
+ modifications, and in Source or Object form, provided that You
99
+ meet the following conditions:
100
+
101
+ (a) You must give any other recipients of the Work or
102
+ Derivative Works a copy of this License; and
103
+
104
+ (b) You must cause any modified files to carry prominent notices
105
+ stating that You changed the files; and
106
+
107
+ (c) You must retain, in the Source form of any Derivative Works
108
+ that You distribute, all copyright, patent, trademark, and
109
+ attribution notices from the Source form of the Work,
110
+ excluding those notices that do not pertain to any part of
111
+ the Derivative Works; and
112
+
113
+ (d) If the Work includes a "NOTICE" text file as part of its
114
+ distribution, then any Derivative Works that You distribute must
115
+ include a readable copy of the attribution notices contained
116
+ within such NOTICE file, excluding those notices that do not
117
+ pertain to any part of the Derivative Works, in at least one
118
+ of the following places: within a NOTICE text file distributed
119
+ as part of the Derivative Works; within the Source form or
120
+ documentation, if provided along with the Derivative Works; or,
121
+ within a display generated by the Derivative Works, if and
122
+ wherever such third-party notices normally appear. The contents
123
+ of the NOTICE file are for informational purposes only and
124
+ do not modify the License. You may add Your own attribution
125
+ notices within Derivative Works that You distribute, alongside
126
+ or as an addendum to the NOTICE text from the Work, provided
127
+ that such additional attribution notices cannot be construed
128
+ as modifying the License.
129
+
130
+ You may add Your own copyright statement to Your modifications and
131
+ may provide additional or different license terms and conditions
132
+ for use, reproduction, or distribution of Your modifications, or
133
+ for any such Derivative Works as a whole, provided Your use,
134
+ reproduction, and distribution of the Work otherwise complies with
135
+ the conditions stated in this License.
136
+
137
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
138
+ any Contribution intentionally submitted for inclusion in the Work
139
+ by You to the Licensor shall be under the terms and conditions of
140
+ this License, without any additional terms or conditions.
141
+ Notwithstanding the above, nothing herein shall supersede or modify
142
+ the terms of any separate license agreement you may have executed
143
+ with Licensor regarding such Contributions.
144
+
145
+ 6. Trademarks. This License does not grant permission to use the trade
146
+ names, trademarks, service marks, or product names of the Licensor,
147
+ except as required for reasonable and customary use in describing the
148
+ origin of the Work and reproducing the content of the NOTICE file.
149
+
150
+ 7. Disclaimer of Warranty. Unless required by applicable law or
151
+ agreed to in writing, Licensor provides the Work (and each
152
+ Contributor provides its Contributions) on an "AS IS" BASIS,
153
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
154
+ implied, including, without limitation, any warranties or conditions
155
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
156
+ PARTICULAR PURPOSE. You are solely responsible for determining the
157
+ appropriateness of using or redistributing the Work and assume any
158
+ risks associated with Your exercise of permissions under this License.
159
+
160
+ 8. Limitation of Liability. In no event and under no legal theory,
161
+ whether in tort (including negligence), contract, or otherwise,
162
+ unless required by applicable law (such as deliberate and grossly
163
+ negligent acts) or agreed to in writing, shall any Contributor be
164
+ liable to You for damages, including any direct, indirect, special,
165
+ incidental, or consequential damages of any character arising as a
166
+ result of this License or out of the use or inability to use the
167
+ Work (including but not limited to damages for loss of goodwill,
168
+ work stoppage, computer failure or malfunction, or any and all
169
+ other commercial damages or losses), even if such Contributor
170
+ has been advised of the possibility of such damages.
171
+
172
+ 9. Accepting Warranty or Additional Liability. While redistributing
173
+ the Work or Derivative Works thereof, You may choose to offer,
174
+ and charge a fee for, acceptance of support, warranty, indemnity,
175
+ or other liability obligations and/or rights consistent with this
176
+ License. However, in accepting such obligations, You may act only
177
+ on Your own behalf and on Your sole responsibility, not on behalf
178
+ of any other Contributor, and only if You agree to indemnify,
179
+ defend, and hold each Contributor harmless for any liability
180
+ incurred by, or claims asserted against, such Contributor by reason
181
+ of your accepting any such warranty or additional liability.
182
+
183
+ END OF TERMS AND CONDITIONS
184
+
185
+ APPENDIX: How to apply the Apache License to your work.
186
+
187
+ To apply the Apache License to your work, attach the following
188
+ boilerplate notice, with the fields enclosed by brackets "[]"
189
+ replaced with your own identifying information. (Don't include
190
+ the brackets!) The text should be enclosed in the appropriate
191
+ comment syntax for the file format. We also recommend that a
192
+ file or class name and description of purpose be included on the
193
+ same "printed page" as the copyright notice for easier
194
+ identification within third-party archives.
195
+
196
+ Copyright 2020 Matthew Barnett
197
+
198
+ Licensed under the Apache License, Version 2.0 (the "License");
199
+ you may not use this file except in compliance with the License.
200
+ You may obtain a copy of the License at
201
+
202
+ http://www.apache.org/licenses/LICENSE-2.0
203
+
204
+ Unless required by applicable law or agreed to in writing, software
205
+ distributed under the License is distributed on an "AS IS" BASIS,
206
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
207
+ See the License for the specific language governing permissions and
208
+ limitations under the License.
env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/METADATA ADDED
@@ -0,0 +1,1076 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: regex
3
+ Version: 2023.12.25
4
+ Summary: Alternative regular expression module, to replace re.
5
+ Home-page: https://github.com/mrabarnett/mrab-regex
6
+ Author: Matthew Barnett
7
+ Author-email: [email protected]
8
+ License: Apache Software License
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: Apache Software License
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python :: 3.7
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
20
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
+ Classifier: Topic :: Text Processing
22
+ Classifier: Topic :: Text Processing :: General
23
+ Requires-Python: >=3.7
24
+ Description-Content-Type: text/x-rst
25
+ License-File: LICENSE.txt
26
+
27
+ Introduction
28
+ ------------
29
+
30
+ This regex implementation is backwards-compatible with the standard 're' module, but offers additional functionality.
31
+
32
+ Note
33
+ ----
34
+
35
+ The re module's behaviour with zero-width matches changed in Python 3.7, and this module follows that behaviour when compiled for Python 3.7.
36
+
37
+ Python 2
38
+ --------
39
+
40
+ Python 2 is no longer supported. The last release that supported Python 2 was 2021.11.10.
41
+
42
+ PyPy
43
+ ----
44
+
45
+ This module is targeted at CPython. It expects that all codepoints are the same width, so it won't behave properly with PyPy outside U+0000..U+007F because PyPy stores strings as UTF-8.
46
+
47
+ Multithreading
48
+ --------------
49
+
50
+ The regex module releases the GIL during matching on instances of the built-in (immutable) string classes, enabling other Python threads to run concurrently. It is also possible to force the regex module to release the GIL during matching by calling the matching methods with the keyword argument ``concurrent=True``. The behaviour is undefined if the string changes during matching, so use it *only* when it is guaranteed that that won't happen.
51
+
52
+ Unicode
53
+ -------
54
+
55
+ This module supports Unicode 15.1.0. Full Unicode case-folding is supported.
56
+
57
+ Flags
58
+ -----
59
+
60
+ There are 2 kinds of flag: scoped and global. Scoped flags can apply to only part of a pattern and can be turned on or off; global flags apply to the entire pattern and can only be turned on.
61
+
62
+ The scoped flags are: ``ASCII (?a)``, ``FULLCASE (?f)``, ``IGNORECASE (?i)``, ``LOCALE (?L)``, ``MULTILINE (?m)``, ``DOTALL (?s)``, ``UNICODE (?u)``, ``VERBOSE (?x)``, ``WORD (?w)``.
63
+
64
+ The global flags are: ``BESTMATCH (?b)``, ``ENHANCEMATCH (?e)``, ``POSIX (?p)``, ``REVERSE (?r)``, ``VERSION0 (?V0)``, ``VERSION1 (?V1)``.
65
+
66
+ If neither the ``ASCII``, ``LOCALE`` nor ``UNICODE`` flag is specified, it will default to ``UNICODE`` if the regex pattern is a Unicode string and ``ASCII`` if it's a bytestring.
67
+
68
+ The ``ENHANCEMATCH`` flag makes fuzzy matching attempt to improve the fit of the next match that it finds.
69
+
70
+ The ``BESTMATCH`` flag makes fuzzy matching search for the best match instead of the next match.
71
+
72
+ Old vs new behaviour
73
+ --------------------
74
+
75
+ In order to be compatible with the re module, this module has 2 behaviours:
76
+
77
+ * **Version 0** behaviour (old behaviour, compatible with the re module):
78
+
79
+ Please note that the re module's behaviour may change over time, and I'll endeavour to match that behaviour in version 0.
80
+
81
+ * Indicated by the ``VERSION0`` flag.
82
+
83
+ * Zero-width matches are not handled correctly in the re module before Python 3.7. The behaviour in those earlier versions is:
84
+
85
+ * ``.split`` won't split a string at a zero-width match.
86
+
87
+ * ``.sub`` will advance by one character after a zero-width match.
88
+
89
+ * Inline flags apply to the entire pattern, and they can't be turned off.
90
+
91
+ * Only simple sets are supported.
92
+
93
+ * Case-insensitive matches in Unicode use simple case-folding by default.
94
+
95
+ * **Version 1** behaviour (new behaviour, possibly different from the re module):
96
+
97
+ * Indicated by the ``VERSION1`` flag.
98
+
99
+ * Zero-width matches are handled correctly.
100
+
101
+ * Inline flags apply to the end of the group or pattern, and they can be turned off.
102
+
103
+ * Nested sets and set operations are supported.
104
+
105
+ * Case-insensitive matches in Unicode use full case-folding by default.
106
+
107
+ If no version is specified, the regex module will default to ``regex.DEFAULT_VERSION``.
108
+
109
+ Case-insensitive matches in Unicode
110
+ -----------------------------------
111
+
112
+ The regex module supports both simple and full case-folding for case-insensitive matches in Unicode. Use of full case-folding can be turned on using the ``FULLCASE`` flag. Please note that this flag affects how the ``IGNORECASE`` flag works; the ``FULLCASE`` flag itself does not turn on case-insensitive matching.
113
+
114
+ Version 0 behaviour: the flag is off by default.
115
+
116
+ Version 1 behaviour: the flag is on by default.
117
+
118
+ Nested sets and set operations
119
+ ------------------------------
120
+
121
+ It's not possible to support both simple sets, as used in the re module, and nested sets at the same time because of a difference in the meaning of an unescaped ``"["`` in a set.
122
+
123
+ For example, the pattern ``[[a-z]--[aeiou]]`` is treated in the version 0 behaviour (simple sets, compatible with the re module) as:
124
+
125
+ * Set containing "[" and the letters "a" to "z"
126
+
127
+ * Literal "--"
128
+
129
+ * Set containing letters "a", "e", "i", "o", "u"
130
+
131
+ * Literal "]"
132
+
133
+ but in the version 1 behaviour (nested sets, enhanced behaviour) as:
134
+
135
+ * Set which is:
136
+
137
+ * Set containing the letters "a" to "z"
138
+
139
+ * but excluding:
140
+
141
+ * Set containing the letters "a", "e", "i", "o", "u"
142
+
143
+ Version 0 behaviour: only simple sets are supported.
144
+
145
+ Version 1 behaviour: nested sets and set operations are supported.
146
+
147
+ Notes on named groups
148
+ ---------------------
149
+
150
+ All groups have a group number, starting from 1.
151
+
152
+ Groups with the same group name will have the same group number, and groups with a different group name will have a different group number.
153
+
154
+ The same name can be used by more than one group, with later captures 'overwriting' earlier captures. All the captures of the group will be available from the ``captures`` method of the match object.
155
+
156
+ Group numbers will be reused across different branches of a branch reset, eg. ``(?|(first)|(second))`` has only group 1. If groups have different group names then they will, of course, have different group numbers, eg. ``(?|(?P<foo>first)|(?P<bar>second))`` has group 1 ("foo") and group 2 ("bar").
157
+
158
+ In the regex ``(\s+)(?|(?P<foo>[A-Z]+)|(\w+) (?P<foo>[0-9]+)`` there are 2 groups:
159
+
160
+ * ``(\s+)`` is group 1.
161
+
162
+ * ``(?P<foo>[A-Z]+)`` is group 2, also called "foo".
163
+
164
+ * ``(\w+)`` is group 2 because of the branch reset.
165
+
166
+ * ``(?P<foo>[0-9]+)`` is group 2 because it's called "foo".
167
+
168
+ If you want to prevent ``(\w+)`` from being group 2, you need to name it (different name, different group number).
169
+
170
+ Additional features
171
+ -------------------
172
+
173
+ The issue numbers relate to the Python bug tracker, except where listed otherwise.
174
+
175
+ Added ``\p{Horiz_Space}`` and ``\p{Vert_Space}`` (`GitHub issue 477 <https://github.com/mrabarnett/mrab-regex/issues/477#issuecomment-1216779547>`_)
176
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
177
+
178
+ ``\p{Horiz_Space}`` or ``\p{H}`` matches horizontal whitespace and ``\p{Vert_Space}`` or ``\p{V}`` matches vertical whitespace.
179
+
180
+ Added support for lookaround in conditional pattern (`Hg issue 163 <https://github.com/mrabarnett/mrab-regex/issues/163>`_)
181
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
182
+
183
+ The test of a conditional pattern can be a lookaround.
184
+
185
+ .. sourcecode:: python
186
+
187
+ >>> regex.match(r'(?(?=\d)\d+|\w+)', '123abc')
188
+ <regex.Match object; span=(0, 3), match='123'>
189
+ >>> regex.match(r'(?(?=\d)\d+|\w+)', 'abc123')
190
+ <regex.Match object; span=(0, 6), match='abc123'>
191
+
192
+ This is not quite the same as putting a lookaround in the first branch of a pair of alternatives.
193
+
194
+ .. sourcecode:: python
195
+
196
+ >>> print(regex.match(r'(?:(?=\d)\d+\b|\w+)', '123abc'))
197
+ <regex.Match object; span=(0, 6), match='123abc'>
198
+ >>> print(regex.match(r'(?(?=\d)\d+\b|\w+)', '123abc'))
199
+ None
200
+
201
+ In the first example, the lookaround matched, but the remainder of the first branch failed to match, and so the second branch was attempted, whereas in the second example, the lookaround matched, and the first branch failed to match, but the second branch was **not** attempted.
202
+
203
+ Added POSIX matching (leftmost longest) (`Hg issue 150 <https://github.com/mrabarnett/mrab-regex/issues/150>`_)
204
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
205
+
206
+ The POSIX standard for regex is to return the leftmost longest match. This can be turned on using the ``POSIX`` flag.
207
+
208
+ .. sourcecode:: python
209
+
210
+ >>> # Normal matching.
211
+ >>> regex.search(r'Mr|Mrs', 'Mrs')
212
+ <regex.Match object; span=(0, 2), match='Mr'>
213
+ >>> regex.search(r'one(self)?(selfsufficient)?', 'oneselfsufficient')
214
+ <regex.Match object; span=(0, 7), match='oneself'>
215
+ >>> # POSIX matching.
216
+ >>> regex.search(r'(?p)Mr|Mrs', 'Mrs')
217
+ <regex.Match object; span=(0, 3), match='Mrs'>
218
+ >>> regex.search(r'(?p)one(self)?(selfsufficient)?', 'oneselfsufficient')
219
+ <regex.Match object; span=(0, 17), match='oneselfsufficient'>
220
+
221
+ Note that it will take longer to find matches because when it finds a match at a certain position, it won't return that immediately, but will keep looking to see if there's another longer match there.
222
+
223
+ Added ``(?(DEFINE)...)`` (`Hg issue 152 <https://github.com/mrabarnett/mrab-regex/issues/152>`_)
224
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
225
+
226
+ If there's no group called "DEFINE", then ... will be ignored except that any groups defined within it can be called and that the normal rules for numbering groups still apply.
227
+
228
+ .. sourcecode:: python
229
+
230
+ >>> regex.search(r'(?(DEFINE)(?P<quant>\d+)(?P<item>\w+))(?&quant) (?&item)', '5 elephants')
231
+ <regex.Match object; span=(0, 11), match='5 elephants'>
232
+
233
+ Added ``(*PRUNE)``, ``(*SKIP)`` and ``(*FAIL)`` (`Hg issue 153 <https://github.com/mrabarnett/mrab-regex/issues/153>`_)
234
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
235
+
236
+ ``(*PRUNE)`` discards the backtracking info up to that point. When used in an atomic group or a lookaround, it won't affect the enclosing pattern.
237
+
238
+ ``(*SKIP)`` is similar to ``(*PRUNE)``, except that it also sets where in the text the next attempt to match will start. When used in an atomic group or a lookaround, it won't affect the enclosing pattern.
239
+
240
+ ``(*FAIL)`` causes immediate backtracking. ``(*F)`` is a permitted abbreviation.
241
+
242
+ Added ``\K`` (`Hg issue 151 <https://github.com/mrabarnett/mrab-regex/issues/151>`_)
243
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
244
+
245
+ Keeps the part of the entire match after the position where ``\K`` occurred; the part before it is discarded.
246
+
247
+ It does not affect what groups return.
248
+
249
+ .. sourcecode:: python
250
+
251
+ >>> m = regex.search(r'(\w\w\K\w\w\w)', 'abcdef')
252
+ >>> m[0]
253
+ 'cde'
254
+ >>> m[1]
255
+ 'abcde'
256
+ >>>
257
+ >>> m = regex.search(r'(?r)(\w\w\K\w\w\w)', 'abcdef')
258
+ >>> m[0]
259
+ 'bc'
260
+ >>> m[1]
261
+ 'bcdef'
262
+
263
+ Added capture subscripting for ``expandf`` and ``subf``/``subfn`` (`Hg issue 133 <https://github.com/mrabarnett/mrab-regex/issues/133>`_)
264
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
265
+
266
+ You can use subscripting to get the captures of a repeated group.
267
+
268
+ .. sourcecode:: python
269
+
270
+ >>> m = regex.match(r"(\w)+", "abc")
271
+ >>> m.expandf("{1}")
272
+ 'c'
273
+ >>> m.expandf("{1[0]} {1[1]} {1[2]}")
274
+ 'a b c'
275
+ >>> m.expandf("{1[-1]} {1[-2]} {1[-3]}")
276
+ 'c b a'
277
+ >>>
278
+ >>> m = regex.match(r"(?P<letter>\w)+", "abc")
279
+ >>> m.expandf("{letter}")
280
+ 'c'
281
+ >>> m.expandf("{letter[0]} {letter[1]} {letter[2]}")
282
+ 'a b c'
283
+ >>> m.expandf("{letter[-1]} {letter[-2]} {letter[-3]}")
284
+ 'c b a'
285
+
286
+ Added support for referring to a group by number using ``(?P=...)``
287
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
288
+
289
+ This is in addition to the existing ``\g<...>``.
290
+
291
+ Fixed the handling of locale-sensitive regexes
292
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
293
+
294
+ The ``LOCALE`` flag is intended for legacy code and has limited support. You're still recommended to use Unicode instead.
295
+
296
+ Added partial matches (`Hg issue 102 <https://github.com/mrabarnett/mrab-regex/issues/102>`_)
297
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
298
+
299
+ A partial match is one that matches up to the end of string, but that string has been truncated and you want to know whether a complete match could be possible if the string had not been truncated.
300
+
301
+ Partial matches are supported by ``match``, ``search``, ``fullmatch`` and ``finditer`` with the ``partial`` keyword argument.
302
+
303
+ Match objects have a ``partial`` attribute, which is ``True`` if it's a partial match.
304
+
305
+ For example, if you wanted a user to enter a 4-digit number and check it character by character as it was being entered:
306
+
307
+ .. sourcecode:: python
308
+
309
+ >>> pattern = regex.compile(r'\d{4}')
310
+
311
+ >>> # Initially, nothing has been entered:
312
+ >>> print(pattern.fullmatch('', partial=True))
313
+ <regex.Match object; span=(0, 0), match='', partial=True>
314
+
315
+ >>> # An empty string is OK, but it's only a partial match.
316
+ >>> # The user enters a letter:
317
+ >>> print(pattern.fullmatch('a', partial=True))
318
+ None
319
+ >>> # It'll never match.
320
+
321
+ >>> # The user deletes that and enters a digit:
322
+ >>> print(pattern.fullmatch('1', partial=True))
323
+ <regex.Match object; span=(0, 1), match='1', partial=True>
324
+ >>> # It matches this far, but it's only a partial match.
325
+
326
+ >>> # The user enters 2 more digits:
327
+ >>> print(pattern.fullmatch('123', partial=True))
328
+ <regex.Match object; span=(0, 3), match='123', partial=True>
329
+ >>> # It matches this far, but it's only a partial match.
330
+
331
+ >>> # The user enters another digit:
332
+ >>> print(pattern.fullmatch('1234', partial=True))
333
+ <regex.Match object; span=(0, 4), match='1234'>
334
+ >>> # It's a complete match.
335
+
336
+ >>> # If the user enters another digit:
337
+ >>> print(pattern.fullmatch('12345', partial=True))
338
+ None
339
+ >>> # It's no longer a match.
340
+
341
+ >>> # This is a partial match:
342
+ >>> pattern.match('123', partial=True).partial
343
+ True
344
+
345
+ >>> # This is a complete match:
346
+ >>> pattern.match('1233', partial=True).partial
347
+ False
348
+
349
+ ``*`` operator not working correctly with sub() (`Hg issue 106 <https://github.com/mrabarnett/mrab-regex/issues/106>`_)
350
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
351
+
352
+ Sometimes it's not clear how zero-width matches should be handled. For example, should ``.*`` match 0 characters directly after matching >0 characters?
353
+
354
+ .. sourcecode:: python
355
+
356
+ # Python 3.7 and later
357
+ >>> regex.sub('.*', 'x', 'test')
358
+ 'xx'
359
+ >>> regex.sub('.*?', '|', 'test')
360
+ '|||||||||'
361
+
362
+ # Python 3.6 and earlier
363
+ >>> regex.sub('(?V0).*', 'x', 'test')
364
+ 'x'
365
+ >>> regex.sub('(?V1).*', 'x', 'test')
366
+ 'xx'
367
+ >>> regex.sub('(?V0).*?', '|', 'test')
368
+ '|t|e|s|t|'
369
+ >>> regex.sub('(?V1).*?', '|', 'test')
370
+ '|||||||||'
371
+
372
+ Added ``capturesdict`` (`Hg issue 86 <https://github.com/mrabarnett/mrab-regex/issues/86>`_)
373
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
374
+
375
+ ``capturesdict`` is a combination of ``groupdict`` and ``captures``:
376
+
377
+ ``groupdict`` returns a dict of the named groups and the last capture of those groups.
378
+
379
+ ``captures`` returns a list of all the captures of a group
380
+
381
+ ``capturesdict`` returns a dict of the named groups and lists of all the captures of those groups.
382
+
383
+ .. sourcecode:: python
384
+
385
+ >>> m = regex.match(r"(?:(?P<word>\w+) (?P<digits>\d+)\n)+", "one 1\ntwo 2\nthree 3\n")
386
+ >>> m.groupdict()
387
+ {'word': 'three', 'digits': '3'}
388
+ >>> m.captures("word")
389
+ ['one', 'two', 'three']
390
+ >>> m.captures("digits")
391
+ ['1', '2', '3']
392
+ >>> m.capturesdict()
393
+ {'word': ['one', 'two', 'three'], 'digits': ['1', '2', '3']}
394
+
395
+ Added ``allcaptures`` and ``allspans`` (`Git issue 474 <https://github.com/mrabarnett/mrab-regex/issues/474>`_)
396
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
397
+
398
+ ``allcaptures`` returns a list of all the captures of all the groups.
399
+
400
+ ``allspans`` returns a list of all the spans of the all captures of all the groups.
401
+
402
+ .. sourcecode:: python
403
+
404
+ >>> m = regex.match(r"(?:(?P<word>\w+) (?P<digits>\d+)\n)+", "one 1\ntwo 2\nthree 3\n")
405
+ >>> m.allcaptures()
406
+ (['one 1\ntwo 2\nthree 3\n'], ['one', 'two', 'three'], ['1', '2', '3'])
407
+ >>> m.allspans()
408
+ ([(0, 20)], [(0, 3), (6, 9), (12, 17)], [(4, 5), (10, 11), (18, 19)])
409
+
410
+ Allow duplicate names of groups (`Hg issue 87 <https://github.com/mrabarnett/mrab-regex/issues/87>`_)
411
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
412
+
413
+ Group names can be duplicated.
414
+
415
+ .. sourcecode:: python
416
+
417
+ >>> # With optional groups:
418
+ >>>
419
+ >>> # Both groups capture, the second capture 'overwriting' the first.
420
+ >>> m = regex.match(r"(?P<item>\w+)? or (?P<item>\w+)?", "first or second")
421
+ >>> m.group("item")
422
+ 'second'
423
+ >>> m.captures("item")
424
+ ['first', 'second']
425
+ >>> # Only the second group captures.
426
+ >>> m = regex.match(r"(?P<item>\w+)? or (?P<item>\w+)?", " or second")
427
+ >>> m.group("item")
428
+ 'second'
429
+ >>> m.captures("item")
430
+ ['second']
431
+ >>> # Only the first group captures.
432
+ >>> m = regex.match(r"(?P<item>\w+)? or (?P<item>\w+)?", "first or ")
433
+ >>> m.group("item")
434
+ 'first'
435
+ >>> m.captures("item")
436
+ ['first']
437
+ >>>
438
+ >>> # With mandatory groups:
439
+ >>>
440
+ >>> # Both groups capture, the second capture 'overwriting' the first.
441
+ >>> m = regex.match(r"(?P<item>\w*) or (?P<item>\w*)?", "first or second")
442
+ >>> m.group("item")
443
+ 'second'
444
+ >>> m.captures("item")
445
+ ['first', 'second']
446
+ >>> # Again, both groups capture, the second capture 'overwriting' the first.
447
+ >>> m = regex.match(r"(?P<item>\w*) or (?P<item>\w*)", " or second")
448
+ >>> m.group("item")
449
+ 'second'
450
+ >>> m.captures("item")
451
+ ['', 'second']
452
+ >>> # And yet again, both groups capture, the second capture 'overwriting' the first.
453
+ >>> m = regex.match(r"(?P<item>\w*) or (?P<item>\w*)", "first or ")
454
+ >>> m.group("item")
455
+ ''
456
+ >>> m.captures("item")
457
+ ['first', '']
458
+
459
+ Added ``fullmatch`` (`issue #16203 <https://bugs.python.org/issue16203>`_)
460
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
461
+
462
+ ``fullmatch`` behaves like ``match``, except that it must match all of the string.
463
+
464
+ .. sourcecode:: python
465
+
466
+ >>> print(regex.fullmatch(r"abc", "abc").span())
467
+ (0, 3)
468
+ >>> print(regex.fullmatch(r"abc", "abcx"))
469
+ None
470
+ >>> print(regex.fullmatch(r"abc", "abcx", endpos=3).span())
471
+ (0, 3)
472
+ >>> print(regex.fullmatch(r"abc", "xabcy", pos=1, endpos=4).span())
473
+ (1, 4)
474
+ >>>
475
+ >>> regex.match(r"a.*?", "abcd").group(0)
476
+ 'a'
477
+ >>> regex.fullmatch(r"a.*?", "abcd").group(0)
478
+ 'abcd'
479
+
480
+ Added ``subf`` and ``subfn``
481
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
482
+
483
+ ``subf`` and ``subfn`` are alternatives to ``sub`` and ``subn`` respectively. When passed a replacement string, they treat it as a format string.
484
+
485
+ .. sourcecode:: python
486
+
487
+ >>> regex.subf(r"(\w+) (\w+)", "{0} => {2} {1}", "foo bar")
488
+ 'foo bar => bar foo'
489
+ >>> regex.subf(r"(?P<word1>\w+) (?P<word2>\w+)", "{word2} {word1}", "foo bar")
490
+ 'bar foo'
491
+
492
+ Added ``expandf`` to match object
493
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
494
+
495
+ ``expandf`` is an alternative to ``expand``. When passed a replacement string, it treats it as a format string.
496
+
497
+ .. sourcecode:: python
498
+
499
+ >>> m = regex.match(r"(\w+) (\w+)", "foo bar")
500
+ >>> m.expandf("{0} => {2} {1}")
501
+ 'foo bar => bar foo'
502
+ >>>
503
+ >>> m = regex.match(r"(?P<word1>\w+) (?P<word2>\w+)", "foo bar")
504
+ >>> m.expandf("{word2} {word1}")
505
+ 'bar foo'
506
+
507
+ Detach searched string
508
+ ^^^^^^^^^^^^^^^^^^^^^^
509
+
510
+ A match object contains a reference to the string that was searched, via its ``string`` attribute. The ``detach_string`` method will 'detach' that string, making it available for garbage collection, which might save valuable memory if that string is very large.
511
+
512
+ .. sourcecode:: python
513
+
514
+ >>> m = regex.search(r"\w+", "Hello world")
515
+ >>> print(m.group())
516
+ Hello
517
+ >>> print(m.string)
518
+ Hello world
519
+ >>> m.detach_string()
520
+ >>> print(m.group())
521
+ Hello
522
+ >>> print(m.string)
523
+ None
524
+
525
+ Recursive patterns (`Hg issue 27 <https://github.com/mrabarnett/mrab-regex/issues/27>`_)
526
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
527
+
528
+ Recursive and repeated patterns are supported.
529
+
530
+ ``(?R)`` or ``(?0)`` tries to match the entire regex recursively. ``(?1)``, ``(?2)``, etc, try to match the relevant group.
531
+
532
+ ``(?&name)`` tries to match the named group.
533
+
534
+ .. sourcecode:: python
535
+
536
+ >>> regex.match(r"(Tarzan|Jane) loves (?1)", "Tarzan loves Jane").groups()
537
+ ('Tarzan',)
538
+ >>> regex.match(r"(Tarzan|Jane) loves (?1)", "Jane loves Tarzan").groups()
539
+ ('Jane',)
540
+
541
+ >>> m = regex.search(r"(\w)(?:(?R)|(\w?))\1", "kayak")
542
+ >>> m.group(0, 1, 2)
543
+ ('kayak', 'k', None)
544
+
545
+ The first two examples show how the subpattern within the group is reused, but is _not_ itself a group. In other words, ``"(Tarzan|Jane) loves (?1)"`` is equivalent to ``"(Tarzan|Jane) loves (?:Tarzan|Jane)"``.
546
+
547
+ It's possible to backtrack into a recursed or repeated group.
548
+
549
+ You can't call a group if there is more than one group with that group name or group number (``"ambiguous group reference"``).
550
+
551
+ The alternative forms ``(?P>name)`` and ``(?P&name)`` are also supported.
552
+
553
+ Full Unicode case-folding is supported
554
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
555
+
556
+ In version 1 behaviour, the regex module uses full case-folding when performing case-insensitive matches in Unicode.
557
+
558
+ .. sourcecode:: python
559
+
560
+ >>> regex.match(r"(?iV1)strasse", "stra\N{LATIN SMALL LETTER SHARP S}e").span()
561
+ (0, 6)
562
+ >>> regex.match(r"(?iV1)stra\N{LATIN SMALL LETTER SHARP S}e", "STRASSE").span()
563
+ (0, 7)
564
+
565
+ In version 0 behaviour, it uses simple case-folding for backward compatibility with the re module.
566
+
567
+ Approximate "fuzzy" matching (`Hg issue 12 <https://github.com/mrabarnett/mrab-regex/issues/12>`_, `Hg issue 41 <https://github.com/mrabarnett/mrab-regex/issues/41>`_, `Hg issue 109 <https://github.com/mrabarnett/mrab-regex/issues/109>`_)
568
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
569
+
570
+ Regex usually attempts an exact match, but sometimes an approximate, or "fuzzy", match is needed, for those cases where the text being searched may contain errors in the form of inserted, deleted or substituted characters.
571
+
572
+ A fuzzy regex specifies which types of errors are permitted, and, optionally, either the minimum and maximum or only the maximum permitted number of each type. (You cannot specify only a minimum.)
573
+
574
+ The 3 types of error are:
575
+
576
+ * Insertion, indicated by "i"
577
+
578
+ * Deletion, indicated by "d"
579
+
580
+ * Substitution, indicated by "s"
581
+
582
+ In addition, "e" indicates any type of error.
583
+
584
+ The fuzziness of a regex item is specified between "{" and "}" after the item.
585
+
586
+ Examples:
587
+
588
+ * ``foo`` match "foo" exactly
589
+
590
+ * ``(?:foo){i}`` match "foo", permitting insertions
591
+
592
+ * ``(?:foo){d}`` match "foo", permitting deletions
593
+
594
+ * ``(?:foo){s}`` match "foo", permitting substitutions
595
+
596
+ * ``(?:foo){i,s}`` match "foo", permitting insertions and substitutions
597
+
598
+ * ``(?:foo){e}`` match "foo", permitting errors
599
+
600
+ If a certain type of error is specified, then any type not specified will **not** be permitted.
601
+
602
+ In the following examples I'll omit the item and write only the fuzziness:
603
+
604
+ * ``{d<=3}`` permit at most 3 deletions, but no other types
605
+
606
+ * ``{i<=1,s<=2}`` permit at most 1 insertion and at most 2 substitutions, but no deletions
607
+
608
+ * ``{1<=e<=3}`` permit at least 1 and at most 3 errors
609
+
610
+ * ``{i<=2,d<=2,e<=3}`` permit at most 2 insertions, at most 2 deletions, at most 3 errors in total, but no substitutions
611
+
612
+ It's also possible to state the costs of each type of error and the maximum permitted total cost.
613
+
614
+ Examples:
615
+
616
+ * ``{2i+2d+1s<=4}`` each insertion costs 2, each deletion costs 2, each substitution costs 1, the total cost must not exceed 4
617
+
618
+ * ``{i<=1,d<=1,s<=1,2i+2d+1s<=4}`` at most 1 insertion, at most 1 deletion, at most 1 substitution; each insertion costs 2, each deletion costs 2, each substitution costs 1, the total cost must not exceed 4
619
+
620
+ You can also use "<" instead of "<=" if you want an exclusive minimum or maximum.
621
+
622
+ You can add a test to perform on a character that's substituted or inserted.
623
+
624
+ Examples:
625
+
626
+ * ``{s<=2:[a-z]}`` at most 2 substitutions, which must be in the character set ``[a-z]``.
627
+
628
+ * ``{s<=2,i<=3:\d}`` at most 2 substitutions, at most 3 insertions, which must be digits.
629
+
630
+ By default, fuzzy matching searches for the first match that meets the given constraints. The ``ENHANCEMATCH`` flag will cause it to attempt to improve the fit (i.e. reduce the number of errors) of the match that it has found.
631
+
632
+ The ``BESTMATCH`` flag will make it search for the best match instead.
633
+
634
+ Further examples to note:
635
+
636
+ * ``regex.search("(dog){e}", "cat and dog")[1]`` returns ``"cat"`` because that matches ``"dog"`` with 3 errors (an unlimited number of errors is permitted).
637
+
638
+ * ``regex.search("(dog){e<=1}", "cat and dog")[1]`` returns ``" dog"`` (with a leading space) because that matches ``"dog"`` with 1 error, which is within the limit.
639
+
640
+ * ``regex.search("(?e)(dog){e<=1}", "cat and dog")[1]`` returns ``"dog"`` (without a leading space) because the fuzzy search matches ``" dog"`` with 1 error, which is within the limit, and the ``(?e)`` then it attempts a better fit.
641
+
642
+ In the first two examples there are perfect matches later in the string, but in neither case is it the first possible match.
643
+
644
+ The match object has an attribute ``fuzzy_counts`` which gives the total number of substitutions, insertions and deletions.
645
+
646
+ .. sourcecode:: python
647
+
648
+ >>> # A 'raw' fuzzy match:
649
+ >>> regex.fullmatch(r"(?:cats|cat){e<=1}", "cat").fuzzy_counts
650
+ (0, 0, 1)
651
+ >>> # 0 substitutions, 0 insertions, 1 deletion.
652
+
653
+ >>> # A better match might be possible if the ENHANCEMATCH flag used:
654
+ >>> regex.fullmatch(r"(?e)(?:cats|cat){e<=1}", "cat").fuzzy_counts
655
+ (0, 0, 0)
656
+ >>> # 0 substitutions, 0 insertions, 0 deletions.
657
+
658
+ The match object also has an attribute ``fuzzy_changes`` which gives a tuple of the positions of the substitutions, insertions and deletions.
659
+
660
+ .. sourcecode:: python
661
+
662
+ >>> m = regex.search('(fuu){i<=2,d<=2,e<=5}', 'anaconda foo bar')
663
+ >>> m
664
+ <regex.Match object; span=(7, 10), match='a f', fuzzy_counts=(0, 2, 2)>
665
+ >>> m.fuzzy_changes
666
+ ([], [7, 8], [10, 11])
667
+
668
+ What this means is that if the matched part of the string had been:
669
+
670
+ .. sourcecode:: python
671
+
672
+ 'anacondfuuoo bar'
673
+
674
+ it would've been an exact match.
675
+
676
+ However, there were insertions at positions 7 and 8:
677
+
678
+ .. sourcecode:: python
679
+
680
+ 'anaconda fuuoo bar'
681
+ ^^
682
+
683
+ and deletions at positions 10 and 11:
684
+
685
+ .. sourcecode:: python
686
+
687
+ 'anaconda f~~oo bar'
688
+ ^^
689
+
690
+ So the actual string was:
691
+
692
+ .. sourcecode:: python
693
+
694
+ 'anaconda foo bar'
695
+
696
+ Named lists ``\L<name>`` (`Hg issue 11 <https://github.com/mrabarnett/mrab-regex/issues/11>`_)
697
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
698
+
699
+ There are occasions where you may want to include a list (actually, a set) of options in a regex.
700
+
701
+ One way is to build the pattern like this:
702
+
703
+ .. sourcecode:: python
704
+
705
+ >>> p = regex.compile(r"first|second|third|fourth|fifth")
706
+
707
+ but if the list is large, parsing the resulting regex can take considerable time, and care must also be taken that the strings are properly escaped and properly ordered, for example, "cats" before "cat".
708
+
709
+ The new alternative is to use a named list:
710
+
711
+ .. sourcecode:: python
712
+
713
+ >>> option_set = ["first", "second", "third", "fourth", "fifth"]
714
+ >>> p = regex.compile(r"\L<options>", options=option_set)
715
+
716
+ The order of the items is irrelevant, they are treated as a set. The named lists are available as the ``.named_lists`` attribute of the pattern object :
717
+
718
+ .. sourcecode:: python
719
+
720
+ >>> print(p.named_lists)
721
+ {'options': frozenset({'third', 'first', 'fifth', 'fourth', 'second'})}
722
+
723
+ If there are any unused keyword arguments, ``ValueError`` will be raised unless you tell it otherwise:
724
+
725
+ .. sourcecode:: python
726
+
727
+ >>> option_set = ["first", "second", "third", "fourth", "fifth"]
728
+ >>> p = regex.compile(r"\L<options>", options=option_set, other_options=[])
729
+ Traceback (most recent call last):
730
+ File "<stdin>", line 1, in <module>
731
+ File "C:\Python310\lib\site-packages\regex\regex.py", line 353, in compile
732
+ return _compile(pattern, flags, ignore_unused, kwargs, cache_pattern)
733
+ File "C:\Python310\lib\site-packages\regex\regex.py", line 500, in _compile
734
+ complain_unused_args()
735
+ File "C:\Python310\lib\site-packages\regex\regex.py", line 483, in complain_unused_args
736
+ raise ValueError('unused keyword argument {!a}'.format(any_one))
737
+ ValueError: unused keyword argument 'other_options'
738
+ >>> p = regex.compile(r"\L<options>", options=option_set, other_options=[], ignore_unused=True)
739
+ >>> p = regex.compile(r"\L<options>", options=option_set, other_options=[], ignore_unused=False)
740
+ Traceback (most recent call last):
741
+ File "<stdin>", line 1, in <module>
742
+ File "C:\Python310\lib\site-packages\regex\regex.py", line 353, in compile
743
+ return _compile(pattern, flags, ignore_unused, kwargs, cache_pattern)
744
+ File "C:\Python310\lib\site-packages\regex\regex.py", line 500, in _compile
745
+ complain_unused_args()
746
+ File "C:\Python310\lib\site-packages\regex\regex.py", line 483, in complain_unused_args
747
+ raise ValueError('unused keyword argument {!a}'.format(any_one))
748
+ ValueError: unused keyword argument 'other_options'
749
+ >>>
750
+
751
+ Start and end of word
752
+ ^^^^^^^^^^^^^^^^^^^^^
753
+
754
+ ``\m`` matches at the start of a word.
755
+
756
+ ``\M`` matches at the end of a word.
757
+
758
+ Compare with ``\b``, which matches at the start or end of a word.
759
+
760
+ Unicode line separators
761
+ ^^^^^^^^^^^^^^^^^^^^^^^
762
+
763
+ Normally the only line separator is ``\n`` (``\x0A``), but if the ``WORD`` flag is turned on then the line separators are ``\x0D\x0A``, ``\x0A``, ``\x0B``, ``\x0C`` and ``\x0D``, plus ``\x85``, ``\u2028`` and ``\u2029`` when working with Unicode.
764
+
765
+ This affects the regex dot ``"."``, which, with the ``DOTALL`` flag turned off, matches any character except a line separator. It also affects the line anchors ``^`` and ``$`` (in multiline mode).
766
+
767
+ Set operators
768
+ ^^^^^^^^^^^^^
769
+
770
+ **Version 1 behaviour only**
771
+
772
+ Set operators have been added, and a set ``[...]`` can include nested sets.
773
+
774
+ The operators, in order of increasing precedence, are:
775
+
776
+ * ``||`` for union ("x||y" means "x or y")
777
+
778
+ * ``~~`` (double tilde) for symmetric difference ("x~~y" means "x or y, but not both")
779
+
780
+ * ``&&`` for intersection ("x&&y" means "x and y")
781
+
782
+ * ``--`` (double dash) for difference ("x--y" means "x but not y")
783
+
784
+ Implicit union, ie, simple juxtaposition like in ``[ab]``, has the highest precedence. Thus, ``[ab&&cd]`` is the same as ``[[a||b]&&[c||d]]``.
785
+
786
+ Examples:
787
+
788
+ * ``[ab]`` # Set containing 'a' and 'b'
789
+
790
+ * ``[a-z]`` # Set containing 'a' .. 'z'
791
+
792
+ * ``[[a-z]--[qw]]`` # Set containing 'a' .. 'z', but not 'q' or 'w'
793
+
794
+ * ``[a-z--qw]`` # Same as above
795
+
796
+ * ``[\p{L}--QW]`` # Set containing all letters except 'Q' and 'W'
797
+
798
+ * ``[\p{N}--[0-9]]`` # Set containing all numbers except '0' .. '9'
799
+
800
+ * ``[\p{ASCII}&&\p{Letter}]`` # Set containing all characters which are ASCII and letter
801
+
802
+ regex.escape (`issue #2650 <https://bugs.python.org/issue2650>`_)
803
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
804
+
805
+ regex.escape has an additional keyword parameter ``special_only``. When True, only 'special' regex characters, such as '?', are escaped.
806
+
807
+ .. sourcecode:: python
808
+
809
+ >>> regex.escape("foo!?", special_only=False)
810
+ 'foo\\!\\?'
811
+ >>> regex.escape("foo!?", special_only=True)
812
+ 'foo!\\?'
813
+
814
+ regex.escape (`Hg issue 249 <https://github.com/mrabarnett/mrab-regex/issues/249>`_)
815
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
816
+
817
+ regex.escape has an additional keyword parameter ``literal_spaces``. When True, spaces are not escaped.
818
+
819
+ .. sourcecode:: python
820
+
821
+ >>> regex.escape("foo bar!?", literal_spaces=False)
822
+ 'foo\\ bar!\\?'
823
+ >>> regex.escape("foo bar!?", literal_spaces=True)
824
+ 'foo bar!\\?'
825
+
826
+ Repeated captures (`issue #7132 <https://bugs.python.org/issue7132>`_)
827
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
828
+
829
+ A match object has additional methods which return information on all the successful matches of a repeated group. These methods are:
830
+
831
+ * ``matchobject.captures([group1, ...])``
832
+
833
+ * Returns a list of the strings matched in a group or groups. Compare with ``matchobject.group([group1, ...])``.
834
+
835
+ * ``matchobject.starts([group])``
836
+
837
+ * Returns a list of the start positions. Compare with ``matchobject.start([group])``.
838
+
839
+ * ``matchobject.ends([group])``
840
+
841
+ * Returns a list of the end positions. Compare with ``matchobject.end([group])``.
842
+
843
+ * ``matchobject.spans([group])``
844
+
845
+ * Returns a list of the spans. Compare with ``matchobject.span([group])``.
846
+
847
+ .. sourcecode:: python
848
+
849
+ >>> m = regex.search(r"(\w{3})+", "123456789")
850
+ >>> m.group(1)
851
+ '789'
852
+ >>> m.captures(1)
853
+ ['123', '456', '789']
854
+ >>> m.start(1)
855
+ 6
856
+ >>> m.starts(1)
857
+ [0, 3, 6]
858
+ >>> m.end(1)
859
+ 9
860
+ >>> m.ends(1)
861
+ [3, 6, 9]
862
+ >>> m.span(1)
863
+ (6, 9)
864
+ >>> m.spans(1)
865
+ [(0, 3), (3, 6), (6, 9)]
866
+
867
+ Atomic grouping ``(?>...)`` (`issue #433030 <https://bugs.python.org/issue433030>`_)
868
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
869
+
870
+ If the following pattern subsequently fails, then the subpattern as a whole will fail.
871
+
872
+ Possessive quantifiers
873
+ ^^^^^^^^^^^^^^^^^^^^^^
874
+
875
+ ``(?:...)?+`` ; ``(?:...)*+`` ; ``(?:...)++`` ; ``(?:...){min,max}+``
876
+
877
+ The subpattern is matched up to 'max' times. If the following pattern subsequently fails, then all the repeated subpatterns will fail as a whole. For example, ``(?:...)++`` is equivalent to ``(?>(?:...)+)``.
878
+
879
+ Scoped flags (`issue #433028 <https://bugs.python.org/issue433028>`_)
880
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
881
+
882
+ ``(?flags-flags:...)``
883
+
884
+ The flags will apply only to the subpattern. Flags can be turned on or off.
885
+
886
+ Definition of 'word' character (`issue #1693050 <https://bugs.python.org/issue1693050>`_)
887
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
888
+
889
+ The definition of a 'word' character has been expanded for Unicode. It conforms to the Unicode specification at ``http://www.unicode.org/reports/tr29/``.
890
+
891
+ Variable-length lookbehind
892
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
893
+
894
+ A lookbehind can match a variable-length string.
895
+
896
+ Flags argument for regex.split, regex.sub and regex.subn (`issue #3482 <https://bugs.python.org/issue3482>`_)
897
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
898
+
899
+ ``regex.split``, ``regex.sub`` and ``regex.subn`` support a 'flags' argument.
900
+
901
+ Pos and endpos arguments for regex.sub and regex.subn
902
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
903
+
904
+ ``regex.sub`` and ``regex.subn`` support 'pos' and 'endpos' arguments.
905
+
906
+ 'Overlapped' argument for regex.findall and regex.finditer
907
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
908
+
909
+ ``regex.findall`` and ``regex.finditer`` support an 'overlapped' flag which permits overlapped matches.
910
+
911
+ Splititer
912
+ ^^^^^^^^^
913
+
914
+ ``regex.splititer`` has been added. It's a generator equivalent of ``regex.split``.
915
+
916
+ Subscripting match objects for groups
917
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
918
+
919
+ A match object accepts access to the groups via subscripting and slicing:
920
+
921
+ .. sourcecode:: python
922
+
923
+ >>> m = regex.search(r"(?P<before>.*?)(?P<num>\d+)(?P<after>.*)", "pqr123stu")
924
+ >>> print(m["before"])
925
+ pqr
926
+ >>> print(len(m))
927
+ 4
928
+ >>> print(m[:])
929
+ ('pqr123stu', 'pqr', '123', 'stu')
930
+
931
+ Named groups
932
+ ^^^^^^^^^^^^
933
+
934
+ Groups can be named with ``(?<name>...)`` as well as the existing ``(?P<name>...)``.
935
+
936
+ Group references
937
+ ^^^^^^^^^^^^^^^^
938
+
939
+ Groups can be referenced within a pattern with ``\g<name>``. This also allows there to be more than 99 groups.
940
+
941
+ Named characters ``\N{name}``
942
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
943
+
944
+ Named characters are supported. Note that only those known by Python's Unicode database will be recognised.
945
+
946
+ Unicode codepoint properties, including scripts and blocks
947
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
948
+
949
+ ``\p{property=value}``; ``\P{property=value}``; ``\p{value}`` ; ``\P{value}``
950
+
951
+ Many Unicode properties are supported, including blocks and scripts. ``\p{property=value}`` or ``\p{property:value}`` matches a character whose property ``property`` has value ``value``. The inverse of ``\p{property=value}`` is ``\P{property=value}`` or ``\p{^property=value}``.
952
+
953
+ If the short form ``\p{value}`` is used, the properties are checked in the order: ``General_Category``, ``Script``, ``Block``, binary property:
954
+
955
+ * ``Latin``, the 'Latin' script (``Script=Latin``).
956
+
957
+ * ``BasicLatin``, the 'BasicLatin' block (``Block=BasicLatin``).
958
+
959
+ * ``Alphabetic``, the 'Alphabetic' binary property (``Alphabetic=Yes``).
960
+
961
+ A short form starting with ``Is`` indicates a script or binary property:
962
+
963
+ * ``IsLatin``, the 'Latin' script (``Script=Latin``).
964
+
965
+ * ``IsAlphabetic``, the 'Alphabetic' binary property (``Alphabetic=Yes``).
966
+
967
+ A short form starting with ``In`` indicates a block property:
968
+
969
+ * ``InBasicLatin``, the 'BasicLatin' block (``Block=BasicLatin``).
970
+
971
+ POSIX character classes
972
+ ^^^^^^^^^^^^^^^^^^^^^^^
973
+
974
+ ``[[:alpha:]]``; ``[[:^alpha:]]``
975
+
976
+ POSIX character classes are supported. These are normally treated as an alternative form of ``\p{...}``.
977
+
978
+ The exceptions are ``alnum``, ``digit``, ``punct`` and ``xdigit``, whose definitions are different from those of Unicode.
979
+
980
+ ``[[:alnum:]]`` is equivalent to ``\p{posix_alnum}``.
981
+
982
+ ``[[:digit:]]`` is equivalent to ``\p{posix_digit}``.
983
+
984
+ ``[[:punct:]]`` is equivalent to ``\p{posix_punct}``.
985
+
986
+ ``[[:xdigit:]]`` is equivalent to ``\p{posix_xdigit}``.
987
+
988
+ Search anchor ``\G``
989
+ ^^^^^^^^^^^^^^^^^^^^
990
+
991
+ A search anchor has been added. It matches at the position where each search started/continued and can be used for contiguous matches or in negative variable-length lookbehinds to limit how far back the lookbehind goes:
992
+
993
+ .. sourcecode:: python
994
+
995
+ >>> regex.findall(r"\w{2}", "abcd ef")
996
+ ['ab', 'cd', 'ef']
997
+ >>> regex.findall(r"\G\w{2}", "abcd ef")
998
+ ['ab', 'cd']
999
+
1000
+ * The search starts at position 0 and matches 'ab'.
1001
+
1002
+ * The search continues at position 2 and matches 'cd'.
1003
+
1004
+ * The search continues at position 4 and fails to match any letters.
1005
+
1006
+ * The anchor stops the search start position from being advanced, so there are no more results.
1007
+
1008
+ Reverse searching
1009
+ ^^^^^^^^^^^^^^^^^
1010
+
1011
+ Searches can also work backwards:
1012
+
1013
+ .. sourcecode:: python
1014
+
1015
+ >>> regex.findall(r".", "abc")
1016
+ ['a', 'b', 'c']
1017
+ >>> regex.findall(r"(?r).", "abc")
1018
+ ['c', 'b', 'a']
1019
+
1020
+ Note that the result of a reverse search is not necessarily the reverse of a forward search:
1021
+
1022
+ .. sourcecode:: python
1023
+
1024
+ >>> regex.findall(r"..", "abcde")
1025
+ ['ab', 'cd']
1026
+ >>> regex.findall(r"(?r)..", "abcde")
1027
+ ['de', 'bc']
1028
+
1029
+ Matching a single grapheme ``\X``
1030
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1031
+
1032
+ The grapheme matcher is supported. It conforms to the Unicode specification at ``http://www.unicode.org/reports/tr29/``.
1033
+
1034
+ Branch reset ``(?|...|...)``
1035
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1036
+
1037
+ Group numbers will be reused across the alternatives, but groups with different names will have different group numbers.
1038
+
1039
+ .. sourcecode:: python
1040
+
1041
+ >>> regex.match(r"(?|(first)|(second))", "first").groups()
1042
+ ('first',)
1043
+ >>> regex.match(r"(?|(first)|(second))", "second").groups()
1044
+ ('second',)
1045
+
1046
+ Note that there is only one group.
1047
+
1048
+ Default Unicode word boundary
1049
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1050
+
1051
+ The ``WORD`` flag changes the definition of a 'word boundary' to that of a default Unicode word boundary. This applies to ``\b`` and ``\B``.
1052
+
1053
+ Timeout
1054
+ ^^^^^^^
1055
+
1056
+ The matching methods and functions support timeouts. The timeout (in seconds) applies to the entire operation:
1057
+
1058
+ .. sourcecode:: python
1059
+
1060
+ >>> from time import sleep
1061
+ >>>
1062
+ >>> def fast_replace(m):
1063
+ ... return 'X'
1064
+ ...
1065
+ >>> def slow_replace(m):
1066
+ ... sleep(0.5)
1067
+ ... return 'X'
1068
+ ...
1069
+ >>> regex.sub(r'[a-z]', fast_replace, 'abcde', timeout=2)
1070
+ 'XXXXX'
1071
+ >>> regex.sub(r'[a-z]', slow_replace, 'abcde', timeout=2)
1072
+ Traceback (most recent call last):
1073
+ File "<stdin>", line 1, in <module>
1074
+ File "C:\Python310\lib\site-packages\regex\regex.py", line 278, in sub
1075
+ return pat.sub(repl, string, count, pos, endpos, concurrent, timeout)
1076
+ TimeoutError: regex timed out
env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/RECORD ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ regex-2023.12.25.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ regex-2023.12.25.dist-info/LICENSE.txt,sha256=v_Ve9M3MjBTOJZ-OirYOJkQYRA1jNfTcE4Jz-9UGFE0,11584
3
+ regex-2023.12.25.dist-info/METADATA,sha256=ucEajo_mHCtyCT0CTybuoZ4MN9u6UakUKQkRLpOXge8,40897
4
+ regex-2023.12.25.dist-info/RECORD,,
5
+ regex-2023.12.25.dist-info/WHEEL,sha256=1FEjxEYgybphwh9S0FO9IcZ0B-NIeM2ko8OzhFZeOeQ,152
6
+ regex-2023.12.25.dist-info/top_level.txt,sha256=aQmiDMhNTF26cCK4_7D-qaVvhbxClG0wyCTnEhkzYBs,6
7
+ regex/__init__.py,sha256=9slNQEb4SCZ9LncNzcQvqmkyxXlcOAF7QwAwigxWjsw,65
8
+ regex/__pycache__/__init__.cpython-310.pyc,,
9
+ regex/__pycache__/_regex_core.cpython-310.pyc,,
10
+ regex/__pycache__/regex.cpython-310.pyc,,
11
+ regex/__pycache__/test_regex.cpython-310.pyc,,
12
+ regex/_regex.cpython-310-x86_64-linux-gnu.so,sha256=eDasy28Zqt08KlBmrPsvhvzf9RC7bT77ODLqPybkzBM,2503320
13
+ regex/_regex_core.py,sha256=6O3OA675IbUDCA9VsWtGr6Ffwuo-Ft619S-JXVGmmLA,140918
14
+ regex/regex.py,sha256=A0auMoHEIK9g-eMDI-tXW52a3imXnalgBUsjq9Yd24s,32683
15
+ regex/test_regex.py,sha256=WBKueH55Ar8-2RRS7c_rJHyLvO9XARZNrJAoiKFCDjw,219666
env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
env-llmeval/lib/python3.10/site-packages/regex-2023.12.25.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ regex
env-llmeval/lib/python3.10/site-packages/sacrebleu/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License"). You may not
7
+ # use this file except in compliance with the License. A copy of the License
8
+ # is located at
9
+ #
10
+ # http://aws.amazon.com/apache2.0/
11
+ #
12
+ # or in the "license" file accompanying this file. This file is distributed on
13
+ # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
14
+ # express or implied. See the License for the specific language governing
15
+ # permissions and limitations under the License.
16
+
17
+ __version__ = '2.4.2'
18
+ __description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores'
19
+
20
+
21
+ from .utils import smart_open, SACREBLEU_DIR, download_test_set
22
+ from .utils import get_source_file, get_reference_files
23
+ from .utils import get_available_testsets, get_langpairs_for_testset
24
+ from .metrics.helpers import extract_word_ngrams, extract_char_ngrams
25
+ from .dataset import DATASETS
26
+ from .metrics import BLEU, CHRF, TER
27
+
28
+ # Backward compatibility functions for old style API access (<= 1.4.10)
29
+ from .compat import corpus_bleu, raw_corpus_bleu, sentence_bleu
30
+ from .compat import corpus_chrf, sentence_chrf
31
+ from .compat import corpus_ter, sentence_ter
32
+
33
+ __all__ = [
34
+ 'smart_open', 'SACREBLEU_DIR', 'download_test_set',
35
+ 'get_source_file', 'get_reference_files',
36
+ 'get_available_testsets', 'get_langpairs_for_testset',
37
+ 'extract_word_ngrams', 'extract_char_ngrams',
38
+ 'DATASETS',
39
+ 'BLEU', 'CHRF', 'TER',
40
+ 'corpus_bleu', 'raw_corpus_bleu', 'sentence_bleu',
41
+ 'corpus_chrf', 'sentence_chrf',
42
+ 'corpus_ter', 'sentence_ter'
43
+ ]
env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of various metrics."""
2
+
3
+ from .bleu import BLEU, BLEUScore # noqa: F401
4
+ from .chrf import CHRF, CHRFScore # noqa: F401
5
+ from .ter import TER, TERScore # noqa: F401
6
+
7
+ METRICS = {
8
+ 'BLEU': BLEU,
9
+ 'CHRF': CHRF,
10
+ 'TER': TER,
11
+ }