applied-ai-018 commited on
Commit
1021645
·
verified ·
1 Parent(s): bf688b6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/pip/_internal/__init__.py +19 -0
  2. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/build_env.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/cache.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/configuration.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/exceptions.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/main.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/pyproject.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/pip/_internal/cache.py +264 -0
  11. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/check.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/debug.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/index.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/install.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/search.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/configuration.py +266 -0
  19. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/download.py +140 -0
  20. llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/search.py +174 -0
  21. llmeval-env/lib/python3.10/site-packages/pip/_internal/configuration.py +366 -0
  22. llmeval-env/lib/python3.10/site-packages/pip/_internal/index/__init__.py +2 -0
  23. llmeval-env/lib/python3.10/site-packages/pip/_internal/index/collector.py +648 -0
  24. llmeval-env/lib/python3.10/site-packages/pip/_internal/index/package_finder.py +1004 -0
  25. llmeval-env/lib/python3.10/site-packages/pip/_internal/index/sources.py +224 -0
  26. llmeval-env/lib/python3.10/site-packages/pip/_internal/main.py +12 -0
  27. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__init__.py +2 -0
  28. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/auth.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/cache.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/download.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/session.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/utils.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/auth.py +323 -0
  37. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/cache.py +69 -0
  38. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/download.py +185 -0
  39. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/lazy_wheel.py +210 -0
  40. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/session.py +454 -0
  41. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/utils.py +96 -0
  42. llmeval-env/lib/python3.10/site-packages/pip/_internal/network/xmlrpc.py +60 -0
  43. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__init__.py +0 -0
  44. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/check.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/build/__init__.py +0 -0
  49. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/pip/_internal/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import pip._internal.utils.inject_securetransport # noqa
4
+ from pip._internal.utils import _log
5
+
6
+ # init_logging() must be called before any call to logging.getLogger()
7
+ # which happens at import of most modules.
8
+ _log.init_logging()
9
+
10
+
11
+ def main(args: (Optional[List[str]]) = None) -> int:
12
+ """This is preserved for old console scripts that may still be referencing
13
+ it.
14
+
15
+ For additional details, see https://github.com/pypa/pip/issues/7498.
16
+ """
17
+ from pip._internal.utils.entrypoints import _wrapper
18
+
19
+ return _wrapper(args)
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/build_env.cpython-310.pyc ADDED
Binary file (9.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/cache.cpython-310.pyc ADDED
Binary file (8.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/configuration.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/main.cpython-310.pyc ADDED
Binary file (618 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/pyproject.cpython-310.pyc ADDED
Binary file (3.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-310.pyc ADDED
Binary file (9.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/cache.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Cache Management
2
+ """
3
+
4
+ import hashlib
5
+ import json
6
+ import logging
7
+ import os
8
+ from typing import Any, Dict, List, Optional, Set
9
+
10
+ from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version
11
+ from pip._vendor.packaging.utils import canonicalize_name
12
+
13
+ from pip._internal.exceptions import InvalidWheelFilename
14
+ from pip._internal.models.format_control import FormatControl
15
+ from pip._internal.models.link import Link
16
+ from pip._internal.models.wheel import Wheel
17
+ from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
18
+ from pip._internal.utils.urls import path_to_url
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def _hash_dict(d: Dict[str, str]) -> str:
24
+ """Return a stable sha224 of a dictionary."""
25
+ s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True)
26
+ return hashlib.sha224(s.encode("ascii")).hexdigest()
27
+
28
+
29
+ class Cache:
30
+ """An abstract class - provides cache directories for data from links
31
+
32
+
33
+ :param cache_dir: The root of the cache.
34
+ :param format_control: An object of FormatControl class to limit
35
+ binaries being read from the cache.
36
+ :param allowed_formats: which formats of files the cache should store.
37
+ ('binary' and 'source' are the only allowed values)
38
+ """
39
+
40
+ def __init__(
41
+ self, cache_dir: str, format_control: FormatControl, allowed_formats: Set[str]
42
+ ) -> None:
43
+ super().__init__()
44
+ assert not cache_dir or os.path.isabs(cache_dir)
45
+ self.cache_dir = cache_dir or None
46
+ self.format_control = format_control
47
+ self.allowed_formats = allowed_formats
48
+
49
+ _valid_formats = {"source", "binary"}
50
+ assert self.allowed_formats.union(_valid_formats) == _valid_formats
51
+
52
+ def _get_cache_path_parts(self, link: Link) -> List[str]:
53
+ """Get parts of part that must be os.path.joined with cache_dir"""
54
+
55
+ # We want to generate an url to use as our cache key, we don't want to
56
+ # just re-use the URL because it might have other items in the fragment
57
+ # and we don't care about those.
58
+ key_parts = {"url": link.url_without_fragment}
59
+ if link.hash_name is not None and link.hash is not None:
60
+ key_parts[link.hash_name] = link.hash
61
+ if link.subdirectory_fragment:
62
+ key_parts["subdirectory"] = link.subdirectory_fragment
63
+
64
+ # Include interpreter name, major and minor version in cache key
65
+ # to cope with ill-behaved sdists that build a different wheel
66
+ # depending on the python version their setup.py is being run on,
67
+ # and don't encode the difference in compatibility tags.
68
+ # https://github.com/pypa/pip/issues/7296
69
+ key_parts["interpreter_name"] = interpreter_name()
70
+ key_parts["interpreter_version"] = interpreter_version()
71
+
72
+ # Encode our key url with sha224, we'll use this because it has similar
73
+ # security properties to sha256, but with a shorter total output (and
74
+ # thus less secure). However the differences don't make a lot of
75
+ # difference for our use case here.
76
+ hashed = _hash_dict(key_parts)
77
+
78
+ # We want to nest the directories some to prevent having a ton of top
79
+ # level directories where we might run out of sub directories on some
80
+ # FS.
81
+ parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
82
+
83
+ return parts
84
+
85
+ def _get_candidates(self, link: Link, canonical_package_name: str) -> List[Any]:
86
+ can_not_cache = not self.cache_dir or not canonical_package_name or not link
87
+ if can_not_cache:
88
+ return []
89
+
90
+ formats = self.format_control.get_allowed_formats(canonical_package_name)
91
+ if not self.allowed_formats.intersection(formats):
92
+ return []
93
+
94
+ candidates = []
95
+ path = self.get_path_for_link(link)
96
+ if os.path.isdir(path):
97
+ for candidate in os.listdir(path):
98
+ candidates.append((candidate, path))
99
+ return candidates
100
+
101
+ def get_path_for_link(self, link: Link) -> str:
102
+ """Return a directory to store cached items in for link."""
103
+ raise NotImplementedError()
104
+
105
+ def get(
106
+ self,
107
+ link: Link,
108
+ package_name: Optional[str],
109
+ supported_tags: List[Tag],
110
+ ) -> Link:
111
+ """Returns a link to a cached item if it exists, otherwise returns the
112
+ passed link.
113
+ """
114
+ raise NotImplementedError()
115
+
116
+
117
+ class SimpleWheelCache(Cache):
118
+ """A cache of wheels for future installs."""
119
+
120
+ def __init__(self, cache_dir: str, format_control: FormatControl) -> None:
121
+ super().__init__(cache_dir, format_control, {"binary"})
122
+
123
+ def get_path_for_link(self, link: Link) -> str:
124
+ """Return a directory to store cached wheels for link
125
+
126
+ Because there are M wheels for any one sdist, we provide a directory
127
+ to cache them in, and then consult that directory when looking up
128
+ cache hits.
129
+
130
+ We only insert things into the cache if they have plausible version
131
+ numbers, so that we don't contaminate the cache with things that were
132
+ not unique. E.g. ./package might have dozens of installs done for it
133
+ and build a version of 0.0...and if we built and cached a wheel, we'd
134
+ end up using the same wheel even if the source has been edited.
135
+
136
+ :param link: The link of the sdist for which this will cache wheels.
137
+ """
138
+ parts = self._get_cache_path_parts(link)
139
+ assert self.cache_dir
140
+ # Store wheels within the root cache_dir
141
+ return os.path.join(self.cache_dir, "wheels", *parts)
142
+
143
+ def get(
144
+ self,
145
+ link: Link,
146
+ package_name: Optional[str],
147
+ supported_tags: List[Tag],
148
+ ) -> Link:
149
+ candidates = []
150
+
151
+ if not package_name:
152
+ return link
153
+
154
+ canonical_package_name = canonicalize_name(package_name)
155
+ for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name):
156
+ try:
157
+ wheel = Wheel(wheel_name)
158
+ except InvalidWheelFilename:
159
+ continue
160
+ if canonicalize_name(wheel.name) != canonical_package_name:
161
+ logger.debug(
162
+ "Ignoring cached wheel %s for %s as it "
163
+ "does not match the expected distribution name %s.",
164
+ wheel_name,
165
+ link,
166
+ package_name,
167
+ )
168
+ continue
169
+ if not wheel.supported(supported_tags):
170
+ # Built for a different python/arch/etc
171
+ continue
172
+ candidates.append(
173
+ (
174
+ wheel.support_index_min(supported_tags),
175
+ wheel_name,
176
+ wheel_dir,
177
+ )
178
+ )
179
+
180
+ if not candidates:
181
+ return link
182
+
183
+ _, wheel_name, wheel_dir = min(candidates)
184
+ return Link(path_to_url(os.path.join(wheel_dir, wheel_name)))
185
+
186
+
187
+ class EphemWheelCache(SimpleWheelCache):
188
+ """A SimpleWheelCache that creates it's own temporary cache directory"""
189
+
190
+ def __init__(self, format_control: FormatControl) -> None:
191
+ self._temp_dir = TempDirectory(
192
+ kind=tempdir_kinds.EPHEM_WHEEL_CACHE,
193
+ globally_managed=True,
194
+ )
195
+
196
+ super().__init__(self._temp_dir.path, format_control)
197
+
198
+
199
+ class CacheEntry:
200
+ def __init__(
201
+ self,
202
+ link: Link,
203
+ persistent: bool,
204
+ ):
205
+ self.link = link
206
+ self.persistent = persistent
207
+
208
+
209
+ class WheelCache(Cache):
210
+ """Wraps EphemWheelCache and SimpleWheelCache into a single Cache
211
+
212
+ This Cache allows for gracefully degradation, using the ephem wheel cache
213
+ when a certain link is not found in the simple wheel cache first.
214
+ """
215
+
216
+ def __init__(self, cache_dir: str, format_control: FormatControl) -> None:
217
+ super().__init__(cache_dir, format_control, {"binary"})
218
+ self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
219
+ self._ephem_cache = EphemWheelCache(format_control)
220
+
221
+ def get_path_for_link(self, link: Link) -> str:
222
+ return self._wheel_cache.get_path_for_link(link)
223
+
224
+ def get_ephem_path_for_link(self, link: Link) -> str:
225
+ return self._ephem_cache.get_path_for_link(link)
226
+
227
+ def get(
228
+ self,
229
+ link: Link,
230
+ package_name: Optional[str],
231
+ supported_tags: List[Tag],
232
+ ) -> Link:
233
+ cache_entry = self.get_cache_entry(link, package_name, supported_tags)
234
+ if cache_entry is None:
235
+ return link
236
+ return cache_entry.link
237
+
238
+ def get_cache_entry(
239
+ self,
240
+ link: Link,
241
+ package_name: Optional[str],
242
+ supported_tags: List[Tag],
243
+ ) -> Optional[CacheEntry]:
244
+ """Returns a CacheEntry with a link to a cached item if it exists or
245
+ None. The cache entry indicates if the item was found in the persistent
246
+ or ephemeral cache.
247
+ """
248
+ retval = self._wheel_cache.get(
249
+ link=link,
250
+ package_name=package_name,
251
+ supported_tags=supported_tags,
252
+ )
253
+ if retval is not link:
254
+ return CacheEntry(retval, persistent=True)
255
+
256
+ retval = self._ephem_cache.get(
257
+ link=link,
258
+ package_name=package_name,
259
+ supported_tags=supported_tags,
260
+ )
261
+ if retval is not link:
262
+ return CacheEntry(retval, persistent=False)
263
+
264
+ return None
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/check.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/debug.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/index.cpython-310.pyc ADDED
Binary file (4.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/install.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/search.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-310.pyc ADDED
Binary file (3.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/configuration.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import subprocess
4
+ from optparse import Values
5
+ from typing import Any, List, Optional
6
+
7
+ from pip._internal.cli.base_command import Command
8
+ from pip._internal.cli.status_codes import ERROR, SUCCESS
9
+ from pip._internal.configuration import (
10
+ Configuration,
11
+ Kind,
12
+ get_configuration_files,
13
+ kinds,
14
+ )
15
+ from pip._internal.exceptions import PipError
16
+ from pip._internal.utils.logging import indent_log
17
+ from pip._internal.utils.misc import get_prog, write_output
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class ConfigurationCommand(Command):
23
+ """
24
+ Manage local and global configuration.
25
+
26
+ Subcommands:
27
+
28
+ - list: List the active configuration (or from the file specified)
29
+ - edit: Edit the configuration file in an editor
30
+ - get: Get the value associated with name
31
+ - set: Set the name=value
32
+ - unset: Unset the value associated with name
33
+ - debug: List the configuration files and values defined under them
34
+
35
+ If none of --user, --global and --site are passed, a virtual
36
+ environment configuration file is used if one is active and the file
37
+ exists. Otherwise, all modifications happen to the user file by
38
+ default.
39
+ """
40
+
41
+ ignore_require_venv = True
42
+ usage = """
43
+ %prog [<file-option>] list
44
+ %prog [<file-option>] [--editor <editor-path>] edit
45
+
46
+ %prog [<file-option>] get name
47
+ %prog [<file-option>] set name value
48
+ %prog [<file-option>] unset name
49
+ %prog [<file-option>] debug
50
+ """
51
+
52
+ def add_options(self) -> None:
53
+ self.cmd_opts.add_option(
54
+ "--editor",
55
+ dest="editor",
56
+ action="store",
57
+ default=None,
58
+ help=(
59
+ "Editor to use to edit the file. Uses VISUAL or EDITOR "
60
+ "environment variables if not provided."
61
+ ),
62
+ )
63
+
64
+ self.cmd_opts.add_option(
65
+ "--global",
66
+ dest="global_file",
67
+ action="store_true",
68
+ default=False,
69
+ help="Use the system-wide configuration file only",
70
+ )
71
+
72
+ self.cmd_opts.add_option(
73
+ "--user",
74
+ dest="user_file",
75
+ action="store_true",
76
+ default=False,
77
+ help="Use the user configuration file only",
78
+ )
79
+
80
+ self.cmd_opts.add_option(
81
+ "--site",
82
+ dest="site_file",
83
+ action="store_true",
84
+ default=False,
85
+ help="Use the current environment configuration file only",
86
+ )
87
+
88
+ self.parser.insert_option_group(0, self.cmd_opts)
89
+
90
+ def run(self, options: Values, args: List[str]) -> int:
91
+ handlers = {
92
+ "list": self.list_values,
93
+ "edit": self.open_in_editor,
94
+ "get": self.get_name,
95
+ "set": self.set_name_value,
96
+ "unset": self.unset_name,
97
+ "debug": self.list_config_values,
98
+ }
99
+
100
+ # Determine action
101
+ if not args or args[0] not in handlers:
102
+ logger.error(
103
+ "Need an action (%s) to perform.",
104
+ ", ".join(sorted(handlers)),
105
+ )
106
+ return ERROR
107
+
108
+ action = args[0]
109
+
110
+ # Determine which configuration files are to be loaded
111
+ # Depends on whether the command is modifying.
112
+ try:
113
+ load_only = self._determine_file(
114
+ options, need_value=(action in ["get", "set", "unset", "edit"])
115
+ )
116
+ except PipError as e:
117
+ logger.error(e.args[0])
118
+ return ERROR
119
+
120
+ # Load a new configuration
121
+ self.configuration = Configuration(
122
+ isolated=options.isolated_mode, load_only=load_only
123
+ )
124
+ self.configuration.load()
125
+
126
+ # Error handling happens here, not in the action-handlers.
127
+ try:
128
+ handlers[action](options, args[1:])
129
+ except PipError as e:
130
+ logger.error(e.args[0])
131
+ return ERROR
132
+
133
+ return SUCCESS
134
+
135
+ def _determine_file(self, options: Values, need_value: bool) -> Optional[Kind]:
136
+ file_options = [
137
+ key
138
+ for key, value in (
139
+ (kinds.USER, options.user_file),
140
+ (kinds.GLOBAL, options.global_file),
141
+ (kinds.SITE, options.site_file),
142
+ )
143
+ if value
144
+ ]
145
+
146
+ if not file_options:
147
+ if not need_value:
148
+ return None
149
+ # Default to user, unless there's a site file.
150
+ elif any(
151
+ os.path.exists(site_config_file)
152
+ for site_config_file in get_configuration_files()[kinds.SITE]
153
+ ):
154
+ return kinds.SITE
155
+ else:
156
+ return kinds.USER
157
+ elif len(file_options) == 1:
158
+ return file_options[0]
159
+
160
+ raise PipError(
161
+ "Need exactly one file to operate upon "
162
+ "(--user, --site, --global) to perform."
163
+ )
164
+
165
+ def list_values(self, options: Values, args: List[str]) -> None:
166
+ self._get_n_args(args, "list", n=0)
167
+
168
+ for key, value in sorted(self.configuration.items()):
169
+ write_output("%s=%r", key, value)
170
+
171
+ def get_name(self, options: Values, args: List[str]) -> None:
172
+ key = self._get_n_args(args, "get [name]", n=1)
173
+ value = self.configuration.get_value(key)
174
+
175
+ write_output("%s", value)
176
+
177
+ def set_name_value(self, options: Values, args: List[str]) -> None:
178
+ key, value = self._get_n_args(args, "set [name] [value]", n=2)
179
+ self.configuration.set_value(key, value)
180
+
181
+ self._save_configuration()
182
+
183
+ def unset_name(self, options: Values, args: List[str]) -> None:
184
+ key = self._get_n_args(args, "unset [name]", n=1)
185
+ self.configuration.unset_value(key)
186
+
187
+ self._save_configuration()
188
+
189
+ def list_config_values(self, options: Values, args: List[str]) -> None:
190
+ """List config key-value pairs across different config files"""
191
+ self._get_n_args(args, "debug", n=0)
192
+
193
+ self.print_env_var_values()
194
+ # Iterate over config files and print if they exist, and the
195
+ # key-value pairs present in them if they do
196
+ for variant, files in sorted(self.configuration.iter_config_files()):
197
+ write_output("%s:", variant)
198
+ for fname in files:
199
+ with indent_log():
200
+ file_exists = os.path.exists(fname)
201
+ write_output("%s, exists: %r", fname, file_exists)
202
+ if file_exists:
203
+ self.print_config_file_values(variant)
204
+
205
+ def print_config_file_values(self, variant: Kind) -> None:
206
+ """Get key-value pairs from the file of a variant"""
207
+ for name, value in self.configuration.get_values_in_config(variant).items():
208
+ with indent_log():
209
+ write_output("%s: %s", name, value)
210
+
211
+ def print_env_var_values(self) -> None:
212
+ """Get key-values pairs present as environment variables"""
213
+ write_output("%s:", "env_var")
214
+ with indent_log():
215
+ for key, value in sorted(self.configuration.get_environ_vars()):
216
+ env_var = f"PIP_{key.upper()}"
217
+ write_output("%s=%r", env_var, value)
218
+
219
+ def open_in_editor(self, options: Values, args: List[str]) -> None:
220
+ editor = self._determine_editor(options)
221
+
222
+ fname = self.configuration.get_file_to_edit()
223
+ if fname is None:
224
+ raise PipError("Could not determine appropriate file.")
225
+
226
+ try:
227
+ subprocess.check_call([editor, fname])
228
+ except subprocess.CalledProcessError as e:
229
+ raise PipError(
230
+ "Editor Subprocess exited with exit code {}".format(e.returncode)
231
+ )
232
+
233
+ def _get_n_args(self, args: List[str], example: str, n: int) -> Any:
234
+ """Helper to make sure the command got the right number of arguments"""
235
+ if len(args) != n:
236
+ msg = (
237
+ "Got unexpected number of arguments, expected {}. "
238
+ '(example: "{} config {}")'
239
+ ).format(n, get_prog(), example)
240
+ raise PipError(msg)
241
+
242
+ if n == 1:
243
+ return args[0]
244
+ else:
245
+ return args
246
+
247
+ def _save_configuration(self) -> None:
248
+ # We successfully ran a modifying command. Need to save the
249
+ # configuration.
250
+ try:
251
+ self.configuration.save()
252
+ except Exception:
253
+ logger.exception(
254
+ "Unable to save configuration. Please report this as a bug."
255
+ )
256
+ raise PipError("Internal Error.")
257
+
258
+ def _determine_editor(self, options: Values) -> str:
259
+ if options.editor is not None:
260
+ return options.editor
261
+ elif "VISUAL" in os.environ:
262
+ return os.environ["VISUAL"]
263
+ elif "EDITOR" in os.environ:
264
+ return os.environ["EDITOR"]
265
+ else:
266
+ raise PipError("Could not determine editor to use.")
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/download.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from optparse import Values
4
+ from typing import List
5
+
6
+ from pip._internal.cli import cmdoptions
7
+ from pip._internal.cli.cmdoptions import make_target_python
8
+ from pip._internal.cli.req_command import RequirementCommand, with_cleanup
9
+ from pip._internal.cli.status_codes import SUCCESS
10
+ from pip._internal.req.req_tracker import get_requirement_tracker
11
+ from pip._internal.utils.misc import ensure_dir, normalize_path, write_output
12
+ from pip._internal.utils.temp_dir import TempDirectory
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class DownloadCommand(RequirementCommand):
18
+ """
19
+ Download packages from:
20
+
21
+ - PyPI (and other indexes) using requirement specifiers.
22
+ - VCS project urls.
23
+ - Local project directories.
24
+ - Local or remote source archives.
25
+
26
+ pip also supports downloading from "requirements files", which provide
27
+ an easy way to specify a whole environment to be downloaded.
28
+ """
29
+
30
+ usage = """
31
+ %prog [options] <requirement specifier> [package-index-options] ...
32
+ %prog [options] -r <requirements file> [package-index-options] ...
33
+ %prog [options] <vcs project url> ...
34
+ %prog [options] <local project path> ...
35
+ %prog [options] <archive url/path> ..."""
36
+
37
+ def add_options(self) -> None:
38
+ self.cmd_opts.add_option(cmdoptions.constraints())
39
+ self.cmd_opts.add_option(cmdoptions.requirements())
40
+ self.cmd_opts.add_option(cmdoptions.no_deps())
41
+ self.cmd_opts.add_option(cmdoptions.global_options())
42
+ self.cmd_opts.add_option(cmdoptions.no_binary())
43
+ self.cmd_opts.add_option(cmdoptions.only_binary())
44
+ self.cmd_opts.add_option(cmdoptions.prefer_binary())
45
+ self.cmd_opts.add_option(cmdoptions.src())
46
+ self.cmd_opts.add_option(cmdoptions.pre())
47
+ self.cmd_opts.add_option(cmdoptions.require_hashes())
48
+ self.cmd_opts.add_option(cmdoptions.progress_bar())
49
+ self.cmd_opts.add_option(cmdoptions.no_build_isolation())
50
+ self.cmd_opts.add_option(cmdoptions.use_pep517())
51
+ self.cmd_opts.add_option(cmdoptions.no_use_pep517())
52
+ self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
53
+
54
+ self.cmd_opts.add_option(
55
+ "-d",
56
+ "--dest",
57
+ "--destination-dir",
58
+ "--destination-directory",
59
+ dest="download_dir",
60
+ metavar="dir",
61
+ default=os.curdir,
62
+ help="Download packages into <dir>.",
63
+ )
64
+
65
+ cmdoptions.add_target_python_options(self.cmd_opts)
66
+
67
+ index_opts = cmdoptions.make_option_group(
68
+ cmdoptions.index_group,
69
+ self.parser,
70
+ )
71
+
72
+ self.parser.insert_option_group(0, index_opts)
73
+ self.parser.insert_option_group(0, self.cmd_opts)
74
+
75
+ @with_cleanup
76
+ def run(self, options: Values, args: List[str]) -> int:
77
+
78
+ options.ignore_installed = True
79
+ # editable doesn't really make sense for `pip download`, but the bowels
80
+ # of the RequirementSet code require that property.
81
+ options.editables = []
82
+
83
+ cmdoptions.check_dist_restriction(options)
84
+
85
+ options.download_dir = normalize_path(options.download_dir)
86
+ ensure_dir(options.download_dir)
87
+
88
+ session = self.get_default_session(options)
89
+
90
+ target_python = make_target_python(options)
91
+ finder = self._build_package_finder(
92
+ options=options,
93
+ session=session,
94
+ target_python=target_python,
95
+ ignore_requires_python=options.ignore_requires_python,
96
+ )
97
+
98
+ req_tracker = self.enter_context(get_requirement_tracker())
99
+
100
+ directory = TempDirectory(
101
+ delete=not options.no_clean,
102
+ kind="download",
103
+ globally_managed=True,
104
+ )
105
+
106
+ reqs = self.get_requirements(args, options, finder, session)
107
+
108
+ preparer = self.make_requirement_preparer(
109
+ temp_build_dir=directory,
110
+ options=options,
111
+ req_tracker=req_tracker,
112
+ session=session,
113
+ finder=finder,
114
+ download_dir=options.download_dir,
115
+ use_user_site=False,
116
+ verbosity=self.verbosity,
117
+ )
118
+
119
+ resolver = self.make_resolver(
120
+ preparer=preparer,
121
+ finder=finder,
122
+ options=options,
123
+ ignore_requires_python=options.ignore_requires_python,
124
+ py_version_info=options.python_version,
125
+ )
126
+
127
+ self.trace_basic_info(finder)
128
+
129
+ requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
130
+
131
+ downloaded: List[str] = []
132
+ for req in requirement_set.requirements.values():
133
+ if req.satisfied_by is None:
134
+ assert req.name is not None
135
+ preparer.save_linked_requirement(req)
136
+ downloaded.append(req.name)
137
+ if downloaded:
138
+ write_output("Successfully downloaded %s", " ".join(downloaded))
139
+
140
+ return SUCCESS
llmeval-env/lib/python3.10/site-packages/pip/_internal/commands/search.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import shutil
3
+ import sys
4
+ import textwrap
5
+ import xmlrpc.client
6
+ from collections import OrderedDict
7
+ from optparse import Values
8
+ from typing import TYPE_CHECKING, Dict, List, Optional
9
+
10
+ from pip._vendor.packaging.version import parse as parse_version
11
+
12
+ from pip._internal.cli.base_command import Command
13
+ from pip._internal.cli.req_command import SessionCommandMixin
14
+ from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS
15
+ from pip._internal.exceptions import CommandError
16
+ from pip._internal.metadata import get_default_environment
17
+ from pip._internal.models.index import PyPI
18
+ from pip._internal.network.xmlrpc import PipXmlrpcTransport
19
+ from pip._internal.utils.logging import indent_log
20
+ from pip._internal.utils.misc import write_output
21
+
22
+ if TYPE_CHECKING:
23
+ from typing import TypedDict
24
+
25
+ class TransformedHit(TypedDict):
26
+ name: str
27
+ summary: str
28
+ versions: List[str]
29
+
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+
34
+ class SearchCommand(Command, SessionCommandMixin):
35
+ """Search for PyPI packages whose name or summary contains <query>."""
36
+
37
+ usage = """
38
+ %prog [options] <query>"""
39
+ ignore_require_venv = True
40
+
41
+ def add_options(self) -> None:
42
+ self.cmd_opts.add_option(
43
+ "-i",
44
+ "--index",
45
+ dest="index",
46
+ metavar="URL",
47
+ default=PyPI.pypi_url,
48
+ help="Base URL of Python Package Index (default %default)",
49
+ )
50
+
51
+ self.parser.insert_option_group(0, self.cmd_opts)
52
+
53
+ def run(self, options: Values, args: List[str]) -> int:
54
+ if not args:
55
+ raise CommandError("Missing required argument (search query).")
56
+ query = args
57
+ pypi_hits = self.search(query, options)
58
+ hits = transform_hits(pypi_hits)
59
+
60
+ terminal_width = None
61
+ if sys.stdout.isatty():
62
+ terminal_width = shutil.get_terminal_size()[0]
63
+
64
+ print_results(hits, terminal_width=terminal_width)
65
+ if pypi_hits:
66
+ return SUCCESS
67
+ return NO_MATCHES_FOUND
68
+
69
+ def search(self, query: List[str], options: Values) -> List[Dict[str, str]]:
70
+ index_url = options.index
71
+
72
+ session = self.get_default_session(options)
73
+
74
+ transport = PipXmlrpcTransport(index_url, session)
75
+ pypi = xmlrpc.client.ServerProxy(index_url, transport)
76
+ try:
77
+ hits = pypi.search({"name": query, "summary": query}, "or")
78
+ except xmlrpc.client.Fault as fault:
79
+ message = "XMLRPC request failed [code: {code}]\n{string}".format(
80
+ code=fault.faultCode,
81
+ string=fault.faultString,
82
+ )
83
+ raise CommandError(message)
84
+ assert isinstance(hits, list)
85
+ return hits
86
+
87
+
88
+ def transform_hits(hits: List[Dict[str, str]]) -> List["TransformedHit"]:
89
+ """
90
+ The list from pypi is really a list of versions. We want a list of
91
+ packages with the list of versions stored inline. This converts the
92
+ list from pypi into one we can use.
93
+ """
94
+ packages: Dict[str, "TransformedHit"] = OrderedDict()
95
+ for hit in hits:
96
+ name = hit["name"]
97
+ summary = hit["summary"]
98
+ version = hit["version"]
99
+
100
+ if name not in packages.keys():
101
+ packages[name] = {
102
+ "name": name,
103
+ "summary": summary,
104
+ "versions": [version],
105
+ }
106
+ else:
107
+ packages[name]["versions"].append(version)
108
+
109
+ # if this is the highest version, replace summary and score
110
+ if version == highest_version(packages[name]["versions"]):
111
+ packages[name]["summary"] = summary
112
+
113
+ return list(packages.values())
114
+
115
+
116
+ def print_dist_installation_info(name: str, latest: str) -> None:
117
+ env = get_default_environment()
118
+ dist = env.get_distribution(name)
119
+ if dist is not None:
120
+ with indent_log():
121
+ if dist.version == latest:
122
+ write_output("INSTALLED: %s (latest)", dist.version)
123
+ else:
124
+ write_output("INSTALLED: %s", dist.version)
125
+ if parse_version(latest).pre:
126
+ write_output(
127
+ "LATEST: %s (pre-release; install"
128
+ " with `pip install --pre`)",
129
+ latest,
130
+ )
131
+ else:
132
+ write_output("LATEST: %s", latest)
133
+
134
+
135
+ def print_results(
136
+ hits: List["TransformedHit"],
137
+ name_column_width: Optional[int] = None,
138
+ terminal_width: Optional[int] = None,
139
+ ) -> None:
140
+ if not hits:
141
+ return
142
+ if name_column_width is None:
143
+ name_column_width = (
144
+ max(
145
+ [
146
+ len(hit["name"]) + len(highest_version(hit.get("versions", ["-"])))
147
+ for hit in hits
148
+ ]
149
+ )
150
+ + 4
151
+ )
152
+
153
+ for hit in hits:
154
+ name = hit["name"]
155
+ summary = hit["summary"] or ""
156
+ latest = highest_version(hit.get("versions", ["-"]))
157
+ if terminal_width is not None:
158
+ target_width = terminal_width - name_column_width - 5
159
+ if target_width > 10:
160
+ # wrap and indent summary to fit terminal
161
+ summary_lines = textwrap.wrap(summary, target_width)
162
+ summary = ("\n" + " " * (name_column_width + 3)).join(summary_lines)
163
+
164
+ name_latest = f"{name} ({latest})"
165
+ line = f"{name_latest:{name_column_width}} - {summary}"
166
+ try:
167
+ write_output(line)
168
+ print_dist_installation_info(name, latest)
169
+ except UnicodeEncodeError:
170
+ pass
171
+
172
+
173
+ def highest_version(versions: List[str]) -> str:
174
+ return max(versions, key=parse_version)
llmeval-env/lib/python3.10/site-packages/pip/_internal/configuration.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Configuration management setup
2
+
3
+ Some terminology:
4
+ - name
5
+ As written in config files.
6
+ - value
7
+ Value associated with a name
8
+ - key
9
+ Name combined with it's section (section.name)
10
+ - variant
11
+ A single word describing where the configuration key-value pair came from
12
+ """
13
+
14
+ import configparser
15
+ import locale
16
+ import os
17
+ import sys
18
+ from typing import Any, Dict, Iterable, List, NewType, Optional, Tuple
19
+
20
+ from pip._internal.exceptions import (
21
+ ConfigurationError,
22
+ ConfigurationFileCouldNotBeLoaded,
23
+ )
24
+ from pip._internal.utils import appdirs
25
+ from pip._internal.utils.compat import WINDOWS
26
+ from pip._internal.utils.logging import getLogger
27
+ from pip._internal.utils.misc import ensure_dir, enum
28
+
29
+ RawConfigParser = configparser.RawConfigParser # Shorthand
30
+ Kind = NewType("Kind", str)
31
+
32
+ CONFIG_BASENAME = "pip.ini" if WINDOWS else "pip.conf"
33
+ ENV_NAMES_IGNORED = "version", "help"
34
+
35
+ # The kinds of configurations there are.
36
+ kinds = enum(
37
+ USER="user", # User Specific
38
+ GLOBAL="global", # System Wide
39
+ SITE="site", # [Virtual] Environment Specific
40
+ ENV="env", # from PIP_CONFIG_FILE
41
+ ENV_VAR="env-var", # from Environment Variables
42
+ )
43
+ OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
44
+ VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE
45
+
46
+ logger = getLogger(__name__)
47
+
48
+
49
+ # NOTE: Maybe use the optionx attribute to normalize keynames.
50
+ def _normalize_name(name: str) -> str:
51
+ """Make a name consistent regardless of source (environment or file)"""
52
+ name = name.lower().replace("_", "-")
53
+ if name.startswith("--"):
54
+ name = name[2:] # only prefer long opts
55
+ return name
56
+
57
+
58
+ def _disassemble_key(name: str) -> List[str]:
59
+ if "." not in name:
60
+ error_message = (
61
+ "Key does not contain dot separated section and key. "
62
+ "Perhaps you wanted to use 'global.{}' instead?"
63
+ ).format(name)
64
+ raise ConfigurationError(error_message)
65
+ return name.split(".", 1)
66
+
67
+
68
+ def get_configuration_files() -> Dict[Kind, List[str]]:
69
+ global_config_files = [
70
+ os.path.join(path, CONFIG_BASENAME) for path in appdirs.site_config_dirs("pip")
71
+ ]
72
+
73
+ site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME)
74
+ legacy_config_file = os.path.join(
75
+ os.path.expanduser("~"),
76
+ "pip" if WINDOWS else ".pip",
77
+ CONFIG_BASENAME,
78
+ )
79
+ new_config_file = os.path.join(appdirs.user_config_dir("pip"), CONFIG_BASENAME)
80
+ return {
81
+ kinds.GLOBAL: global_config_files,
82
+ kinds.SITE: [site_config_file],
83
+ kinds.USER: [legacy_config_file, new_config_file],
84
+ }
85
+
86
+
87
+ class Configuration:
88
+ """Handles management of configuration.
89
+
90
+ Provides an interface to accessing and managing configuration files.
91
+
92
+ This class converts provides an API that takes "section.key-name" style
93
+ keys and stores the value associated with it as "key-name" under the
94
+ section "section".
95
+
96
+ This allows for a clean interface wherein the both the section and the
97
+ key-name are preserved in an easy to manage form in the configuration files
98
+ and the data stored is also nice.
99
+ """
100
+
101
+ def __init__(self, isolated: bool, load_only: Optional[Kind] = None) -> None:
102
+ super().__init__()
103
+
104
+ if load_only is not None and load_only not in VALID_LOAD_ONLY:
105
+ raise ConfigurationError(
106
+ "Got invalid value for load_only - should be one of {}".format(
107
+ ", ".join(map(repr, VALID_LOAD_ONLY))
108
+ )
109
+ )
110
+ self.isolated = isolated
111
+ self.load_only = load_only
112
+
113
+ # Because we keep track of where we got the data from
114
+ self._parsers: Dict[Kind, List[Tuple[str, RawConfigParser]]] = {
115
+ variant: [] for variant in OVERRIDE_ORDER
116
+ }
117
+ self._config: Dict[Kind, Dict[str, Any]] = {
118
+ variant: {} for variant in OVERRIDE_ORDER
119
+ }
120
+ self._modified_parsers: List[Tuple[str, RawConfigParser]] = []
121
+
122
+ def load(self) -> None:
123
+ """Loads configuration from configuration files and environment"""
124
+ self._load_config_files()
125
+ if not self.isolated:
126
+ self._load_environment_vars()
127
+
128
+ def get_file_to_edit(self) -> Optional[str]:
129
+ """Returns the file with highest priority in configuration"""
130
+ assert self.load_only is not None, "Need to be specified a file to be editing"
131
+
132
+ try:
133
+ return self._get_parser_to_modify()[0]
134
+ except IndexError:
135
+ return None
136
+
137
+ def items(self) -> Iterable[Tuple[str, Any]]:
138
+ """Returns key-value pairs like dict.items() representing the loaded
139
+ configuration
140
+ """
141
+ return self._dictionary.items()
142
+
143
+ def get_value(self, key: str) -> Any:
144
+ """Get a value from the configuration."""
145
+ try:
146
+ return self._dictionary[key]
147
+ except KeyError:
148
+ raise ConfigurationError(f"No such key - {key}")
149
+
150
+ def set_value(self, key: str, value: Any) -> None:
151
+ """Modify a value in the configuration."""
152
+ self._ensure_have_load_only()
153
+
154
+ assert self.load_only
155
+ fname, parser = self._get_parser_to_modify()
156
+
157
+ if parser is not None:
158
+ section, name = _disassemble_key(key)
159
+
160
+ # Modify the parser and the configuration
161
+ if not parser.has_section(section):
162
+ parser.add_section(section)
163
+ parser.set(section, name, value)
164
+
165
+ self._config[self.load_only][key] = value
166
+ self._mark_as_modified(fname, parser)
167
+
168
+ def unset_value(self, key: str) -> None:
169
+ """Unset a value in the configuration."""
170
+ self._ensure_have_load_only()
171
+
172
+ assert self.load_only
173
+ if key not in self._config[self.load_only]:
174
+ raise ConfigurationError(f"No such key - {key}")
175
+
176
+ fname, parser = self._get_parser_to_modify()
177
+
178
+ if parser is not None:
179
+ section, name = _disassemble_key(key)
180
+ if not (
181
+ parser.has_section(section) and parser.remove_option(section, name)
182
+ ):
183
+ # The option was not removed.
184
+ raise ConfigurationError(
185
+ "Fatal Internal error [id=1]. Please report as a bug."
186
+ )
187
+
188
+ # The section may be empty after the option was removed.
189
+ if not parser.items(section):
190
+ parser.remove_section(section)
191
+ self._mark_as_modified(fname, parser)
192
+
193
+ del self._config[self.load_only][key]
194
+
195
+ def save(self) -> None:
196
+ """Save the current in-memory state."""
197
+ self._ensure_have_load_only()
198
+
199
+ for fname, parser in self._modified_parsers:
200
+ logger.info("Writing to %s", fname)
201
+
202
+ # Ensure directory exists.
203
+ ensure_dir(os.path.dirname(fname))
204
+
205
+ with open(fname, "w") as f:
206
+ parser.write(f)
207
+
208
+ #
209
+ # Private routines
210
+ #
211
+
212
+ def _ensure_have_load_only(self) -> None:
213
+ if self.load_only is None:
214
+ raise ConfigurationError("Needed a specific file to be modifying.")
215
+ logger.debug("Will be working with %s variant only", self.load_only)
216
+
217
+ @property
218
+ def _dictionary(self) -> Dict[str, Any]:
219
+ """A dictionary representing the loaded configuration."""
220
+ # NOTE: Dictionaries are not populated if not loaded. So, conditionals
221
+ # are not needed here.
222
+ retval = {}
223
+
224
+ for variant in OVERRIDE_ORDER:
225
+ retval.update(self._config[variant])
226
+
227
+ return retval
228
+
229
+ def _load_config_files(self) -> None:
230
+ """Loads configuration from configuration files"""
231
+ config_files = dict(self.iter_config_files())
232
+ if config_files[kinds.ENV][0:1] == [os.devnull]:
233
+ logger.debug(
234
+ "Skipping loading configuration files due to "
235
+ "environment's PIP_CONFIG_FILE being os.devnull"
236
+ )
237
+ return
238
+
239
+ for variant, files in config_files.items():
240
+ for fname in files:
241
+ # If there's specific variant set in `load_only`, load only
242
+ # that variant, not the others.
243
+ if self.load_only is not None and variant != self.load_only:
244
+ logger.debug("Skipping file '%s' (variant: %s)", fname, variant)
245
+ continue
246
+
247
+ parser = self._load_file(variant, fname)
248
+
249
+ # Keeping track of the parsers used
250
+ self._parsers[variant].append((fname, parser))
251
+
252
+ def _load_file(self, variant: Kind, fname: str) -> RawConfigParser:
253
+ logger.verbose("For variant '%s', will try loading '%s'", variant, fname)
254
+ parser = self._construct_parser(fname)
255
+
256
+ for section in parser.sections():
257
+ items = parser.items(section)
258
+ self._config[variant].update(self._normalized_keys(section, items))
259
+
260
+ return parser
261
+
262
+ def _construct_parser(self, fname: str) -> RawConfigParser:
263
+ parser = configparser.RawConfigParser()
264
+ # If there is no such file, don't bother reading it but create the
265
+ # parser anyway, to hold the data.
266
+ # Doing this is useful when modifying and saving files, where we don't
267
+ # need to construct a parser.
268
+ if os.path.exists(fname):
269
+ locale_encoding = locale.getpreferredencoding(False)
270
+ try:
271
+ parser.read(fname, encoding=locale_encoding)
272
+ except UnicodeDecodeError:
273
+ # See https://github.com/pypa/pip/issues/4963
274
+ raise ConfigurationFileCouldNotBeLoaded(
275
+ reason=f"contains invalid {locale_encoding} characters",
276
+ fname=fname,
277
+ )
278
+ except configparser.Error as error:
279
+ # See https://github.com/pypa/pip/issues/4893
280
+ raise ConfigurationFileCouldNotBeLoaded(error=error)
281
+ return parser
282
+
283
+ def _load_environment_vars(self) -> None:
284
+ """Loads configuration from environment variables"""
285
+ self._config[kinds.ENV_VAR].update(
286
+ self._normalized_keys(":env:", self.get_environ_vars())
287
+ )
288
+
289
+ def _normalized_keys(
290
+ self, section: str, items: Iterable[Tuple[str, Any]]
291
+ ) -> Dict[str, Any]:
292
+ """Normalizes items to construct a dictionary with normalized keys.
293
+
294
+ This routine is where the names become keys and are made the same
295
+ regardless of source - configuration files or environment.
296
+ """
297
+ normalized = {}
298
+ for name, val in items:
299
+ key = section + "." + _normalize_name(name)
300
+ normalized[key] = val
301
+ return normalized
302
+
303
+ def get_environ_vars(self) -> Iterable[Tuple[str, str]]:
304
+ """Returns a generator with all environmental vars with prefix PIP_"""
305
+ for key, val in os.environ.items():
306
+ if key.startswith("PIP_"):
307
+ name = key[4:].lower()
308
+ if name not in ENV_NAMES_IGNORED:
309
+ yield name, val
310
+
311
+ # XXX: This is patched in the tests.
312
+ def iter_config_files(self) -> Iterable[Tuple[Kind, List[str]]]:
313
+ """Yields variant and configuration files associated with it.
314
+
315
+ This should be treated like items of a dictionary.
316
+ """
317
+ # SMELL: Move the conditions out of this function
318
+
319
+ # environment variables have the lowest priority
320
+ config_file = os.environ.get("PIP_CONFIG_FILE", None)
321
+ if config_file is not None:
322
+ yield kinds.ENV, [config_file]
323
+ else:
324
+ yield kinds.ENV, []
325
+
326
+ config_files = get_configuration_files()
327
+
328
+ # at the base we have any global configuration
329
+ yield kinds.GLOBAL, config_files[kinds.GLOBAL]
330
+
331
+ # per-user configuration next
332
+ should_load_user_config = not self.isolated and not (
333
+ config_file and os.path.exists(config_file)
334
+ )
335
+ if should_load_user_config:
336
+ # The legacy config file is overridden by the new config file
337
+ yield kinds.USER, config_files[kinds.USER]
338
+
339
+ # finally virtualenv configuration first trumping others
340
+ yield kinds.SITE, config_files[kinds.SITE]
341
+
342
+ def get_values_in_config(self, variant: Kind) -> Dict[str, Any]:
343
+ """Get values present in a config file"""
344
+ return self._config[variant]
345
+
346
+ def _get_parser_to_modify(self) -> Tuple[str, RawConfigParser]:
347
+ # Determine which parser to modify
348
+ assert self.load_only
349
+ parsers = self._parsers[self.load_only]
350
+ if not parsers:
351
+ # This should not happen if everything works correctly.
352
+ raise ConfigurationError(
353
+ "Fatal Internal error [id=2]. Please report as a bug."
354
+ )
355
+
356
+ # Use the highest priority parser.
357
+ return parsers[-1]
358
+
359
+ # XXX: This is patched in the tests.
360
+ def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None:
361
+ file_parser_tuple = (fname, parser)
362
+ if file_parser_tuple not in self._modified_parsers:
363
+ self._modified_parsers.append(file_parser_tuple)
364
+
365
+ def __repr__(self) -> str:
366
+ return f"{self.__class__.__name__}({self._dictionary!r})"
llmeval-env/lib/python3.10/site-packages/pip/_internal/index/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ """Index interaction code
2
+ """
llmeval-env/lib/python3.10/site-packages/pip/_internal/index/collector.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The main purpose of this module is to expose LinkCollector.collect_sources().
3
+ """
4
+
5
+ import cgi
6
+ import collections
7
+ import functools
8
+ import itertools
9
+ import logging
10
+ import os
11
+ import re
12
+ import urllib.parse
13
+ import urllib.request
14
+ import xml.etree.ElementTree
15
+ from html.parser import HTMLParser
16
+ from optparse import Values
17
+ from typing import (
18
+ TYPE_CHECKING,
19
+ Any,
20
+ Callable,
21
+ Dict,
22
+ Iterable,
23
+ List,
24
+ MutableMapping,
25
+ NamedTuple,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Union,
30
+ )
31
+
32
+ from pip._vendor import html5lib, requests
33
+ from pip._vendor.requests import Response
34
+ from pip._vendor.requests.exceptions import RetryError, SSLError
35
+
36
+ from pip._internal.exceptions import NetworkConnectionError
37
+ from pip._internal.models.link import Link
38
+ from pip._internal.models.search_scope import SearchScope
39
+ from pip._internal.network.session import PipSession
40
+ from pip._internal.network.utils import raise_for_status
41
+ from pip._internal.utils.deprecation import deprecated
42
+ from pip._internal.utils.filetypes import is_archive_file
43
+ from pip._internal.utils.misc import pairwise, redact_auth_from_url
44
+ from pip._internal.vcs import vcs
45
+
46
+ from .sources import CandidatesFromPage, LinkSource, build_source
47
+
48
+ if TYPE_CHECKING:
49
+ from typing import Protocol
50
+ else:
51
+ Protocol = object
52
+
53
+ logger = logging.getLogger(__name__)
54
+
55
+ HTMLElement = xml.etree.ElementTree.Element
56
+ ResponseHeaders = MutableMapping[str, str]
57
+
58
+
59
+ def _match_vcs_scheme(url: str) -> Optional[str]:
60
+ """Look for VCS schemes in the URL.
61
+
62
+ Returns the matched VCS scheme, or None if there's no match.
63
+ """
64
+ for scheme in vcs.schemes:
65
+ if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
66
+ return scheme
67
+ return None
68
+
69
+
70
+ class _NotHTML(Exception):
71
+ def __init__(self, content_type: str, request_desc: str) -> None:
72
+ super().__init__(content_type, request_desc)
73
+ self.content_type = content_type
74
+ self.request_desc = request_desc
75
+
76
+
77
+ def _ensure_html_header(response: Response) -> None:
78
+ """Check the Content-Type header to ensure the response contains HTML.
79
+
80
+ Raises `_NotHTML` if the content type is not text/html.
81
+ """
82
+ content_type = response.headers.get("Content-Type", "")
83
+ if not content_type.lower().startswith("text/html"):
84
+ raise _NotHTML(content_type, response.request.method)
85
+
86
+
87
+ class _NotHTTP(Exception):
88
+ pass
89
+
90
+
91
+ def _ensure_html_response(url: str, session: PipSession) -> None:
92
+ """Send a HEAD request to the URL, and ensure the response contains HTML.
93
+
94
+ Raises `_NotHTTP` if the URL is not available for a HEAD request, or
95
+ `_NotHTML` if the content type is not text/html.
96
+ """
97
+ scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
98
+ if scheme not in {"http", "https"}:
99
+ raise _NotHTTP()
100
+
101
+ resp = session.head(url, allow_redirects=True)
102
+ raise_for_status(resp)
103
+
104
+ _ensure_html_header(resp)
105
+
106
+
107
+ def _get_html_response(url: str, session: PipSession) -> Response:
108
+ """Access an HTML page with GET, and return the response.
109
+
110
+ This consists of three parts:
111
+
112
+ 1. If the URL looks suspiciously like an archive, send a HEAD first to
113
+ check the Content-Type is HTML, to avoid downloading a large file.
114
+ Raise `_NotHTTP` if the content type cannot be determined, or
115
+ `_NotHTML` if it is not HTML.
116
+ 2. Actually perform the request. Raise HTTP exceptions on network failures.
117
+ 3. Check the Content-Type header to make sure we got HTML, and raise
118
+ `_NotHTML` otherwise.
119
+ """
120
+ if is_archive_file(Link(url).filename):
121
+ _ensure_html_response(url, session=session)
122
+
123
+ logger.debug("Getting page %s", redact_auth_from_url(url))
124
+
125
+ resp = session.get(
126
+ url,
127
+ headers={
128
+ "Accept": "text/html",
129
+ # We don't want to blindly returned cached data for
130
+ # /simple/, because authors generally expecting that
131
+ # twine upload && pip install will function, but if
132
+ # they've done a pip install in the last ~10 minutes
133
+ # it won't. Thus by setting this to zero we will not
134
+ # blindly use any cached data, however the benefit of
135
+ # using max-age=0 instead of no-cache, is that we will
136
+ # still support conditional requests, so we will still
137
+ # minimize traffic sent in cases where the page hasn't
138
+ # changed at all, we will just always incur the round
139
+ # trip for the conditional GET now instead of only
140
+ # once per 10 minutes.
141
+ # For more information, please see pypa/pip#5670.
142
+ "Cache-Control": "max-age=0",
143
+ },
144
+ )
145
+ raise_for_status(resp)
146
+
147
+ # The check for archives above only works if the url ends with
148
+ # something that looks like an archive. However that is not a
149
+ # requirement of an url. Unless we issue a HEAD request on every
150
+ # url we cannot know ahead of time for sure if something is HTML
151
+ # or not. However we can check after we've downloaded it.
152
+ _ensure_html_header(resp)
153
+
154
+ return resp
155
+
156
+
157
+ def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
158
+ """Determine if we have any encoding information in our headers."""
159
+ if headers and "Content-Type" in headers:
160
+ content_type, params = cgi.parse_header(headers["Content-Type"])
161
+ if "charset" in params:
162
+ return params["charset"]
163
+ return None
164
+
165
+
166
+ def _determine_base_url(document: HTMLElement, page_url: str) -> str:
167
+ """Determine the HTML document's base URL.
168
+
169
+ This looks for a ``<base>`` tag in the HTML document. If present, its href
170
+ attribute denotes the base URL of anchor tags in the document. If there is
171
+ no such tag (or if it does not have a valid href attribute), the HTML
172
+ file's URL is used as the base URL.
173
+
174
+ :param document: An HTML document representation. The current
175
+ implementation expects the result of ``html5lib.parse()``.
176
+ :param page_url: The URL of the HTML document.
177
+
178
+ TODO: Remove when `html5lib` is dropped.
179
+ """
180
+ for base in document.findall(".//base"):
181
+ href = base.get("href")
182
+ if href is not None:
183
+ return href
184
+ return page_url
185
+
186
+
187
+ def _clean_url_path_part(part: str) -> str:
188
+ """
189
+ Clean a "part" of a URL path (i.e. after splitting on "@" characters).
190
+ """
191
+ # We unquote prior to quoting to make sure nothing is double quoted.
192
+ return urllib.parse.quote(urllib.parse.unquote(part))
193
+
194
+
195
+ def _clean_file_url_path(part: str) -> str:
196
+ """
197
+ Clean the first part of a URL path that corresponds to a local
198
+ filesystem path (i.e. the first part after splitting on "@" characters).
199
+ """
200
+ # We unquote prior to quoting to make sure nothing is double quoted.
201
+ # Also, on Windows the path part might contain a drive letter which
202
+ # should not be quoted. On Linux where drive letters do not
203
+ # exist, the colon should be quoted. We rely on urllib.request
204
+ # to do the right thing here.
205
+ return urllib.request.pathname2url(urllib.request.url2pathname(part))
206
+
207
+
208
+ # percent-encoded: /
209
+ _reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
210
+
211
+
212
+ def _clean_url_path(path: str, is_local_path: bool) -> str:
213
+ """
214
+ Clean the path portion of a URL.
215
+ """
216
+ if is_local_path:
217
+ clean_func = _clean_file_url_path
218
+ else:
219
+ clean_func = _clean_url_path_part
220
+
221
+ # Split on the reserved characters prior to cleaning so that
222
+ # revision strings in VCS URLs are properly preserved.
223
+ parts = _reserved_chars_re.split(path)
224
+
225
+ cleaned_parts = []
226
+ for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
227
+ cleaned_parts.append(clean_func(to_clean))
228
+ # Normalize %xx escapes (e.g. %2f -> %2F)
229
+ cleaned_parts.append(reserved.upper())
230
+
231
+ return "".join(cleaned_parts)
232
+
233
+
234
+ def _clean_link(url: str) -> str:
235
+ """
236
+ Make sure a link is fully quoted.
237
+ For example, if ' ' occurs in the URL, it will be replaced with "%20",
238
+ and without double-quoting other characters.
239
+ """
240
+ # Split the URL into parts according to the general structure
241
+ # `scheme://netloc/path;parameters?query#fragment`.
242
+ result = urllib.parse.urlparse(url)
243
+ # If the netloc is empty, then the URL refers to a local filesystem path.
244
+ is_local_path = not result.netloc
245
+ path = _clean_url_path(result.path, is_local_path=is_local_path)
246
+ return urllib.parse.urlunparse(result._replace(path=path))
247
+
248
+
249
+ def _create_link_from_element(
250
+ element_attribs: Dict[str, Optional[str]],
251
+ page_url: str,
252
+ base_url: str,
253
+ ) -> Optional[Link]:
254
+ """
255
+ Convert an anchor element's attributes in a simple repository page to a Link.
256
+ """
257
+ href = element_attribs.get("href")
258
+ if not href:
259
+ return None
260
+
261
+ url = _clean_link(urllib.parse.urljoin(base_url, href))
262
+ pyrequire = element_attribs.get("data-requires-python")
263
+ yanked_reason = element_attribs.get("data-yanked")
264
+
265
+ link = Link(
266
+ url,
267
+ comes_from=page_url,
268
+ requires_python=pyrequire,
269
+ yanked_reason=yanked_reason,
270
+ )
271
+
272
+ return link
273
+
274
+
275
+ class CacheablePageContent:
276
+ def __init__(self, page: "HTMLPage") -> None:
277
+ assert page.cache_link_parsing
278
+ self.page = page
279
+
280
+ def __eq__(self, other: object) -> bool:
281
+ return isinstance(other, type(self)) and self.page.url == other.page.url
282
+
283
+ def __hash__(self) -> int:
284
+ return hash(self.page.url)
285
+
286
+
287
+ class ParseLinks(Protocol):
288
+ def __call__(
289
+ self, page: "HTMLPage", use_deprecated_html5lib: bool
290
+ ) -> Iterable[Link]:
291
+ ...
292
+
293
+
294
+ def with_cached_html_pages(fn: ParseLinks) -> ParseLinks:
295
+ """
296
+ Given a function that parses an Iterable[Link] from an HTMLPage, cache the
297
+ function's result (keyed by CacheablePageContent), unless the HTMLPage
298
+ `page` has `page.cache_link_parsing == False`.
299
+ """
300
+
301
+ @functools.lru_cache(maxsize=None)
302
+ def wrapper(
303
+ cacheable_page: CacheablePageContent, use_deprecated_html5lib: bool
304
+ ) -> List[Link]:
305
+ return list(fn(cacheable_page.page, use_deprecated_html5lib))
306
+
307
+ @functools.wraps(fn)
308
+ def wrapper_wrapper(page: "HTMLPage", use_deprecated_html5lib: bool) -> List[Link]:
309
+ if page.cache_link_parsing:
310
+ return wrapper(CacheablePageContent(page), use_deprecated_html5lib)
311
+ return list(fn(page, use_deprecated_html5lib))
312
+
313
+ return wrapper_wrapper
314
+
315
+
316
+ def _parse_links_html5lib(page: "HTMLPage") -> Iterable[Link]:
317
+ """
318
+ Parse an HTML document, and yield its anchor elements as Link objects.
319
+
320
+ TODO: Remove when `html5lib` is dropped.
321
+ """
322
+ document = html5lib.parse(
323
+ page.content,
324
+ transport_encoding=page.encoding,
325
+ namespaceHTMLElements=False,
326
+ )
327
+
328
+ url = page.url
329
+ base_url = _determine_base_url(document, url)
330
+ for anchor in document.findall(".//a"):
331
+ link = _create_link_from_element(
332
+ anchor.attrib,
333
+ page_url=url,
334
+ base_url=base_url,
335
+ )
336
+ if link is None:
337
+ continue
338
+ yield link
339
+
340
+
341
+ @with_cached_html_pages
342
+ def parse_links(page: "HTMLPage", use_deprecated_html5lib: bool) -> Iterable[Link]:
343
+ """
344
+ Parse an HTML document, and yield its anchor elements as Link objects.
345
+ """
346
+ encoding = page.encoding or "utf-8"
347
+
348
+ # Check if the page starts with a valid doctype, to decide whether to use
349
+ # http.parser or (deprecated) html5lib for parsing -- unless explicitly
350
+ # requested to use html5lib.
351
+ if not use_deprecated_html5lib:
352
+ expected_doctype = "<!doctype html>".encode(encoding)
353
+ actual_start = page.content[: len(expected_doctype)]
354
+ if actual_start.decode(encoding).lower() != "<!doctype html>":
355
+ deprecated(
356
+ reason=(
357
+ f"The HTML index page being used ({page.url}) is not a proper "
358
+ "HTML 5 document. This is in violation of PEP 503 which requires "
359
+ "these pages to be well-formed HTML 5 documents. Please reach out "
360
+ "to the owners of this index page, and ask them to update this "
361
+ "index page to a valid HTML 5 document."
362
+ ),
363
+ replacement=None,
364
+ gone_in="22.2",
365
+ issue=10825,
366
+ )
367
+ use_deprecated_html5lib = True
368
+
369
+ if use_deprecated_html5lib:
370
+ yield from _parse_links_html5lib(page)
371
+ return
372
+
373
+ parser = HTMLLinkParser()
374
+ parser.feed(page.content.decode(encoding))
375
+
376
+ url = page.url
377
+ base_url = parser.base_url or url
378
+ for anchor in parser.anchors:
379
+ link = _create_link_from_element(
380
+ anchor,
381
+ page_url=url,
382
+ base_url=base_url,
383
+ )
384
+ if link is None:
385
+ continue
386
+ yield link
387
+
388
+
389
+ class HTMLPage:
390
+ """Represents one page, along with its URL"""
391
+
392
+ def __init__(
393
+ self,
394
+ content: bytes,
395
+ encoding: Optional[str],
396
+ url: str,
397
+ cache_link_parsing: bool = True,
398
+ ) -> None:
399
+ """
400
+ :param encoding: the encoding to decode the given content.
401
+ :param url: the URL from which the HTML was downloaded.
402
+ :param cache_link_parsing: whether links parsed from this page's url
403
+ should be cached. PyPI index urls should
404
+ have this set to False, for example.
405
+ """
406
+ self.content = content
407
+ self.encoding = encoding
408
+ self.url = url
409
+ self.cache_link_parsing = cache_link_parsing
410
+
411
+ def __str__(self) -> str:
412
+ return redact_auth_from_url(self.url)
413
+
414
+
415
+ class HTMLLinkParser(HTMLParser):
416
+ """
417
+ HTMLParser that keeps the first base HREF and a list of all anchor
418
+ elements' attributes.
419
+ """
420
+
421
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
422
+ super().__init__(*args, **kwargs)
423
+ self._seen_decl = False
424
+ self.base_url: Optional[str] = None
425
+ self.anchors: List[Dict[str, Optional[str]]] = []
426
+
427
+ def handle_decl(self, decl: str) -> None:
428
+ if decl.lower() != "doctype html":
429
+ self._raise_error()
430
+ self._seen_decl = True
431
+
432
+ def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
433
+ if not self._seen_decl:
434
+ self._raise_error()
435
+
436
+ if tag == "base" and self.base_url is None:
437
+ href = self.get_href(attrs)
438
+ if href is not None:
439
+ self.base_url = href
440
+ elif tag == "a":
441
+ self.anchors.append(dict(attrs))
442
+
443
+ def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
444
+ for name, value in attrs:
445
+ if name == "href":
446
+ return value
447
+ return None
448
+
449
+ def _raise_error(self) -> None:
450
+ raise ValueError(
451
+ "HTML doctype missing or incorrect. Expected <!DOCTYPE html>.\n\n"
452
+ "If you believe this error to be incorrect, try passing the "
453
+ "command line option --use-deprecated=html5lib and please leave "
454
+ "a comment on the pip issue at https://github.com/pypa/pip/issues/10825."
455
+ )
456
+
457
+
458
+ def _handle_get_page_fail(
459
+ link: Link,
460
+ reason: Union[str, Exception],
461
+ meth: Optional[Callable[..., None]] = None,
462
+ ) -> None:
463
+ if meth is None:
464
+ meth = logger.debug
465
+ meth("Could not fetch URL %s: %s - skipping", link, reason)
466
+
467
+
468
+ def _make_html_page(response: Response, cache_link_parsing: bool = True) -> HTMLPage:
469
+ encoding = _get_encoding_from_headers(response.headers)
470
+ return HTMLPage(
471
+ response.content,
472
+ encoding=encoding,
473
+ url=response.url,
474
+ cache_link_parsing=cache_link_parsing,
475
+ )
476
+
477
+
478
+ def _get_html_page(
479
+ link: Link, session: Optional[PipSession] = None
480
+ ) -> Optional["HTMLPage"]:
481
+ if session is None:
482
+ raise TypeError(
483
+ "_get_html_page() missing 1 required keyword argument: 'session'"
484
+ )
485
+
486
+ url = link.url.split("#", 1)[0]
487
+
488
+ # Check for VCS schemes that do not support lookup as web pages.
489
+ vcs_scheme = _match_vcs_scheme(url)
490
+ if vcs_scheme:
491
+ logger.warning(
492
+ "Cannot look at %s URL %s because it does not support lookup as web pages.",
493
+ vcs_scheme,
494
+ link,
495
+ )
496
+ return None
497
+
498
+ # Tack index.html onto file:// URLs that point to directories
499
+ scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
500
+ if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
501
+ # add trailing slash if not present so urljoin doesn't trim
502
+ # final segment
503
+ if not url.endswith("/"):
504
+ url += "/"
505
+ url = urllib.parse.urljoin(url, "index.html")
506
+ logger.debug(" file: URL is directory, getting %s", url)
507
+
508
+ try:
509
+ resp = _get_html_response(url, session=session)
510
+ except _NotHTTP:
511
+ logger.warning(
512
+ "Skipping page %s because it looks like an archive, and cannot "
513
+ "be checked by a HTTP HEAD request.",
514
+ link,
515
+ )
516
+ except _NotHTML as exc:
517
+ logger.warning(
518
+ "Skipping page %s because the %s request got Content-Type: %s."
519
+ "The only supported Content-Type is text/html",
520
+ link,
521
+ exc.request_desc,
522
+ exc.content_type,
523
+ )
524
+ except NetworkConnectionError as exc:
525
+ _handle_get_page_fail(link, exc)
526
+ except RetryError as exc:
527
+ _handle_get_page_fail(link, exc)
528
+ except SSLError as exc:
529
+ reason = "There was a problem confirming the ssl certificate: "
530
+ reason += str(exc)
531
+ _handle_get_page_fail(link, reason, meth=logger.info)
532
+ except requests.ConnectionError as exc:
533
+ _handle_get_page_fail(link, f"connection error: {exc}")
534
+ except requests.Timeout:
535
+ _handle_get_page_fail(link, "timed out")
536
+ else:
537
+ return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing)
538
+ return None
539
+
540
+
541
+ class CollectedSources(NamedTuple):
542
+ find_links: Sequence[Optional[LinkSource]]
543
+ index_urls: Sequence[Optional[LinkSource]]
544
+
545
+
546
+ class LinkCollector:
547
+
548
+ """
549
+ Responsible for collecting Link objects from all configured locations,
550
+ making network requests as needed.
551
+
552
+ The class's main method is its collect_sources() method.
553
+ """
554
+
555
+ def __init__(
556
+ self,
557
+ session: PipSession,
558
+ search_scope: SearchScope,
559
+ ) -> None:
560
+ self.search_scope = search_scope
561
+ self.session = session
562
+
563
+ @classmethod
564
+ def create(
565
+ cls,
566
+ session: PipSession,
567
+ options: Values,
568
+ suppress_no_index: bool = False,
569
+ ) -> "LinkCollector":
570
+ """
571
+ :param session: The Session to use to make requests.
572
+ :param suppress_no_index: Whether to ignore the --no-index option
573
+ when constructing the SearchScope object.
574
+ """
575
+ index_urls = [options.index_url] + options.extra_index_urls
576
+ if options.no_index and not suppress_no_index:
577
+ logger.debug(
578
+ "Ignoring indexes: %s",
579
+ ",".join(redact_auth_from_url(url) for url in index_urls),
580
+ )
581
+ index_urls = []
582
+
583
+ # Make sure find_links is a list before passing to create().
584
+ find_links = options.find_links or []
585
+
586
+ search_scope = SearchScope.create(
587
+ find_links=find_links,
588
+ index_urls=index_urls,
589
+ )
590
+ link_collector = LinkCollector(
591
+ session=session,
592
+ search_scope=search_scope,
593
+ )
594
+ return link_collector
595
+
596
+ @property
597
+ def find_links(self) -> List[str]:
598
+ return self.search_scope.find_links
599
+
600
+ def fetch_page(self, location: Link) -> Optional[HTMLPage]:
601
+ """
602
+ Fetch an HTML page containing package links.
603
+ """
604
+ return _get_html_page(location, session=self.session)
605
+
606
+ def collect_sources(
607
+ self,
608
+ project_name: str,
609
+ candidates_from_page: CandidatesFromPage,
610
+ ) -> CollectedSources:
611
+ # The OrderedDict calls deduplicate sources by URL.
612
+ index_url_sources = collections.OrderedDict(
613
+ build_source(
614
+ loc,
615
+ candidates_from_page=candidates_from_page,
616
+ page_validator=self.session.is_secure_origin,
617
+ expand_dir=False,
618
+ cache_link_parsing=False,
619
+ )
620
+ for loc in self.search_scope.get_index_urls_locations(project_name)
621
+ ).values()
622
+ find_links_sources = collections.OrderedDict(
623
+ build_source(
624
+ loc,
625
+ candidates_from_page=candidates_from_page,
626
+ page_validator=self.session.is_secure_origin,
627
+ expand_dir=True,
628
+ cache_link_parsing=True,
629
+ )
630
+ for loc in self.find_links
631
+ ).values()
632
+
633
+ if logger.isEnabledFor(logging.DEBUG):
634
+ lines = [
635
+ f"* {s.link}"
636
+ for s in itertools.chain(find_links_sources, index_url_sources)
637
+ if s is not None and s.link is not None
638
+ ]
639
+ lines = [
640
+ f"{len(lines)} location(s) to search "
641
+ f"for versions of {project_name}:"
642
+ ] + lines
643
+ logger.debug("\n".join(lines))
644
+
645
+ return CollectedSources(
646
+ find_links=list(find_links_sources),
647
+ index_urls=list(index_url_sources),
648
+ )
llmeval-env/lib/python3.10/site-packages/pip/_internal/index/package_finder.py ADDED
@@ -0,0 +1,1004 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Routines related to PyPI, indexes"""
2
+
3
+ # The following comment should be removed at some point in the future.
4
+ # mypy: strict-optional=False
5
+
6
+ import functools
7
+ import itertools
8
+ import logging
9
+ import re
10
+ from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union
11
+
12
+ from pip._vendor.packaging import specifiers
13
+ from pip._vendor.packaging.tags import Tag
14
+ from pip._vendor.packaging.utils import canonicalize_name
15
+ from pip._vendor.packaging.version import _BaseVersion
16
+ from pip._vendor.packaging.version import parse as parse_version
17
+
18
+ from pip._internal.exceptions import (
19
+ BestVersionAlreadyInstalled,
20
+ DistributionNotFound,
21
+ InvalidWheelFilename,
22
+ UnsupportedWheel,
23
+ )
24
+ from pip._internal.index.collector import LinkCollector, parse_links
25
+ from pip._internal.models.candidate import InstallationCandidate
26
+ from pip._internal.models.format_control import FormatControl
27
+ from pip._internal.models.link import Link
28
+ from pip._internal.models.search_scope import SearchScope
29
+ from pip._internal.models.selection_prefs import SelectionPreferences
30
+ from pip._internal.models.target_python import TargetPython
31
+ from pip._internal.models.wheel import Wheel
32
+ from pip._internal.req import InstallRequirement
33
+ from pip._internal.utils._log import getLogger
34
+ from pip._internal.utils.filetypes import WHEEL_EXTENSION
35
+ from pip._internal.utils.hashes import Hashes
36
+ from pip._internal.utils.logging import indent_log
37
+ from pip._internal.utils.misc import build_netloc
38
+ from pip._internal.utils.packaging import check_requires_python
39
+ from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
40
+
41
+ __all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
42
+
43
+
44
+ logger = getLogger(__name__)
45
+
46
+ BuildTag = Union[Tuple[()], Tuple[int, str]]
47
+ CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
48
+
49
+
50
+ def _check_link_requires_python(
51
+ link: Link,
52
+ version_info: Tuple[int, int, int],
53
+ ignore_requires_python: bool = False,
54
+ ) -> bool:
55
+ """
56
+ Return whether the given Python version is compatible with a link's
57
+ "Requires-Python" value.
58
+
59
+ :param version_info: A 3-tuple of ints representing the Python
60
+ major-minor-micro version to check.
61
+ :param ignore_requires_python: Whether to ignore the "Requires-Python"
62
+ value if the given Python version isn't compatible.
63
+ """
64
+ try:
65
+ is_compatible = check_requires_python(
66
+ link.requires_python,
67
+ version_info=version_info,
68
+ )
69
+ except specifiers.InvalidSpecifier:
70
+ logger.debug(
71
+ "Ignoring invalid Requires-Python (%r) for link: %s",
72
+ link.requires_python,
73
+ link,
74
+ )
75
+ else:
76
+ if not is_compatible:
77
+ version = ".".join(map(str, version_info))
78
+ if not ignore_requires_python:
79
+ logger.verbose(
80
+ "Link requires a different Python (%s not in: %r): %s",
81
+ version,
82
+ link.requires_python,
83
+ link,
84
+ )
85
+ return False
86
+
87
+ logger.debug(
88
+ "Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
89
+ version,
90
+ link.requires_python,
91
+ link,
92
+ )
93
+
94
+ return True
95
+
96
+
97
+ class LinkEvaluator:
98
+
99
+ """
100
+ Responsible for evaluating links for a particular project.
101
+ """
102
+
103
+ _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
104
+
105
+ # Don't include an allow_yanked default value to make sure each call
106
+ # site considers whether yanked releases are allowed. This also causes
107
+ # that decision to be made explicit in the calling code, which helps
108
+ # people when reading the code.
109
+ def __init__(
110
+ self,
111
+ project_name: str,
112
+ canonical_name: str,
113
+ formats: FrozenSet[str],
114
+ target_python: TargetPython,
115
+ allow_yanked: bool,
116
+ ignore_requires_python: Optional[bool] = None,
117
+ ) -> None:
118
+ """
119
+ :param project_name: The user supplied package name.
120
+ :param canonical_name: The canonical package name.
121
+ :param formats: The formats allowed for this package. Should be a set
122
+ with 'binary' or 'source' or both in it.
123
+ :param target_python: The target Python interpreter to use when
124
+ evaluating link compatibility. This is used, for example, to
125
+ check wheel compatibility, as well as when checking the Python
126
+ version, e.g. the Python version embedded in a link filename
127
+ (or egg fragment) and against an HTML link's optional PEP 503
128
+ "data-requires-python" attribute.
129
+ :param allow_yanked: Whether files marked as yanked (in the sense
130
+ of PEP 592) are permitted to be candidates for install.
131
+ :param ignore_requires_python: Whether to ignore incompatible
132
+ PEP 503 "data-requires-python" values in HTML links. Defaults
133
+ to False.
134
+ """
135
+ if ignore_requires_python is None:
136
+ ignore_requires_python = False
137
+
138
+ self._allow_yanked = allow_yanked
139
+ self._canonical_name = canonical_name
140
+ self._ignore_requires_python = ignore_requires_python
141
+ self._formats = formats
142
+ self._target_python = target_python
143
+
144
+ self.project_name = project_name
145
+
146
+ def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]:
147
+ """
148
+ Determine whether a link is a candidate for installation.
149
+
150
+ :return: A tuple (is_candidate, result), where `result` is (1) a
151
+ version string if `is_candidate` is True, and (2) if
152
+ `is_candidate` is False, an optional string to log the reason
153
+ the link fails to qualify.
154
+ """
155
+ version = None
156
+ if link.is_yanked and not self._allow_yanked:
157
+ reason = link.yanked_reason or "<none given>"
158
+ return (False, f"yanked for reason: {reason}")
159
+
160
+ if link.egg_fragment:
161
+ egg_info = link.egg_fragment
162
+ ext = link.ext
163
+ else:
164
+ egg_info, ext = link.splitext()
165
+ if not ext:
166
+ return (False, "not a file")
167
+ if ext not in SUPPORTED_EXTENSIONS:
168
+ return (False, f"unsupported archive format: {ext}")
169
+ if "binary" not in self._formats and ext == WHEEL_EXTENSION:
170
+ reason = "No binaries permitted for {}".format(self.project_name)
171
+ return (False, reason)
172
+ if "macosx10" in link.path and ext == ".zip":
173
+ return (False, "macosx10 one")
174
+ if ext == WHEEL_EXTENSION:
175
+ try:
176
+ wheel = Wheel(link.filename)
177
+ except InvalidWheelFilename:
178
+ return (False, "invalid wheel filename")
179
+ if canonicalize_name(wheel.name) != self._canonical_name:
180
+ reason = "wrong project name (not {})".format(self.project_name)
181
+ return (False, reason)
182
+
183
+ supported_tags = self._target_python.get_tags()
184
+ if not wheel.supported(supported_tags):
185
+ # Include the wheel's tags in the reason string to
186
+ # simplify troubleshooting compatibility issues.
187
+ file_tags = wheel.get_formatted_file_tags()
188
+ reason = (
189
+ "none of the wheel's tags ({}) are compatible "
190
+ "(run pip debug --verbose to show compatible tags)".format(
191
+ ", ".join(file_tags)
192
+ )
193
+ )
194
+ return (False, reason)
195
+
196
+ version = wheel.version
197
+
198
+ # This should be up by the self.ok_binary check, but see issue 2700.
199
+ if "source" not in self._formats and ext != WHEEL_EXTENSION:
200
+ reason = f"No sources permitted for {self.project_name}"
201
+ return (False, reason)
202
+
203
+ if not version:
204
+ version = _extract_version_from_fragment(
205
+ egg_info,
206
+ self._canonical_name,
207
+ )
208
+ if not version:
209
+ reason = f"Missing project version for {self.project_name}"
210
+ return (False, reason)
211
+
212
+ match = self._py_version_re.search(version)
213
+ if match:
214
+ version = version[: match.start()]
215
+ py_version = match.group(1)
216
+ if py_version != self._target_python.py_version:
217
+ return (False, "Python version is incorrect")
218
+
219
+ supports_python = _check_link_requires_python(
220
+ link,
221
+ version_info=self._target_python.py_version_info,
222
+ ignore_requires_python=self._ignore_requires_python,
223
+ )
224
+ if not supports_python:
225
+ # Return None for the reason text to suppress calling
226
+ # _log_skipped_link().
227
+ return (False, None)
228
+
229
+ logger.debug("Found link %s, version: %s", link, version)
230
+
231
+ return (True, version)
232
+
233
+
234
+ def filter_unallowed_hashes(
235
+ candidates: List[InstallationCandidate],
236
+ hashes: Hashes,
237
+ project_name: str,
238
+ ) -> List[InstallationCandidate]:
239
+ """
240
+ Filter out candidates whose hashes aren't allowed, and return a new
241
+ list of candidates.
242
+
243
+ If at least one candidate has an allowed hash, then all candidates with
244
+ either an allowed hash or no hash specified are returned. Otherwise,
245
+ the given candidates are returned.
246
+
247
+ Including the candidates with no hash specified when there is a match
248
+ allows a warning to be logged if there is a more preferred candidate
249
+ with no hash specified. Returning all candidates in the case of no
250
+ matches lets pip report the hash of the candidate that would otherwise
251
+ have been installed (e.g. permitting the user to more easily update
252
+ their requirements file with the desired hash).
253
+ """
254
+ if not hashes:
255
+ logger.debug(
256
+ "Given no hashes to check %s links for project %r: "
257
+ "discarding no candidates",
258
+ len(candidates),
259
+ project_name,
260
+ )
261
+ # Make sure we're not returning back the given value.
262
+ return list(candidates)
263
+
264
+ matches_or_no_digest = []
265
+ # Collect the non-matches for logging purposes.
266
+ non_matches = []
267
+ match_count = 0
268
+ for candidate in candidates:
269
+ link = candidate.link
270
+ if not link.has_hash:
271
+ pass
272
+ elif link.is_hash_allowed(hashes=hashes):
273
+ match_count += 1
274
+ else:
275
+ non_matches.append(candidate)
276
+ continue
277
+
278
+ matches_or_no_digest.append(candidate)
279
+
280
+ if match_count:
281
+ filtered = matches_or_no_digest
282
+ else:
283
+ # Make sure we're not returning back the given value.
284
+ filtered = list(candidates)
285
+
286
+ if len(filtered) == len(candidates):
287
+ discard_message = "discarding no candidates"
288
+ else:
289
+ discard_message = "discarding {} non-matches:\n {}".format(
290
+ len(non_matches),
291
+ "\n ".join(str(candidate.link) for candidate in non_matches),
292
+ )
293
+
294
+ logger.debug(
295
+ "Checked %s links for project %r against %s hashes "
296
+ "(%s matches, %s no digest): %s",
297
+ len(candidates),
298
+ project_name,
299
+ hashes.digest_count,
300
+ match_count,
301
+ len(matches_or_no_digest) - match_count,
302
+ discard_message,
303
+ )
304
+
305
+ return filtered
306
+
307
+
308
+ class CandidatePreferences:
309
+
310
+ """
311
+ Encapsulates some of the preferences for filtering and sorting
312
+ InstallationCandidate objects.
313
+ """
314
+
315
+ def __init__(
316
+ self,
317
+ prefer_binary: bool = False,
318
+ allow_all_prereleases: bool = False,
319
+ ) -> None:
320
+ """
321
+ :param allow_all_prereleases: Whether to allow all pre-releases.
322
+ """
323
+ self.allow_all_prereleases = allow_all_prereleases
324
+ self.prefer_binary = prefer_binary
325
+
326
+
327
+ class BestCandidateResult:
328
+ """A collection of candidates, returned by `PackageFinder.find_best_candidate`.
329
+
330
+ This class is only intended to be instantiated by CandidateEvaluator's
331
+ `compute_best_candidate()` method.
332
+ """
333
+
334
+ def __init__(
335
+ self,
336
+ candidates: List[InstallationCandidate],
337
+ applicable_candidates: List[InstallationCandidate],
338
+ best_candidate: Optional[InstallationCandidate],
339
+ ) -> None:
340
+ """
341
+ :param candidates: A sequence of all available candidates found.
342
+ :param applicable_candidates: The applicable candidates.
343
+ :param best_candidate: The most preferred candidate found, or None
344
+ if no applicable candidates were found.
345
+ """
346
+ assert set(applicable_candidates) <= set(candidates)
347
+
348
+ if best_candidate is None:
349
+ assert not applicable_candidates
350
+ else:
351
+ assert best_candidate in applicable_candidates
352
+
353
+ self._applicable_candidates = applicable_candidates
354
+ self._candidates = candidates
355
+
356
+ self.best_candidate = best_candidate
357
+
358
+ def iter_all(self) -> Iterable[InstallationCandidate]:
359
+ """Iterate through all candidates."""
360
+ return iter(self._candidates)
361
+
362
+ def iter_applicable(self) -> Iterable[InstallationCandidate]:
363
+ """Iterate through the applicable candidates."""
364
+ return iter(self._applicable_candidates)
365
+
366
+
367
+ class CandidateEvaluator:
368
+
369
+ """
370
+ Responsible for filtering and sorting candidates for installation based
371
+ on what tags are valid.
372
+ """
373
+
374
+ @classmethod
375
+ def create(
376
+ cls,
377
+ project_name: str,
378
+ target_python: Optional[TargetPython] = None,
379
+ prefer_binary: bool = False,
380
+ allow_all_prereleases: bool = False,
381
+ specifier: Optional[specifiers.BaseSpecifier] = None,
382
+ hashes: Optional[Hashes] = None,
383
+ ) -> "CandidateEvaluator":
384
+ """Create a CandidateEvaluator object.
385
+
386
+ :param target_python: The target Python interpreter to use when
387
+ checking compatibility. If None (the default), a TargetPython
388
+ object will be constructed from the running Python.
389
+ :param specifier: An optional object implementing `filter`
390
+ (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
391
+ versions.
392
+ :param hashes: An optional collection of allowed hashes.
393
+ """
394
+ if target_python is None:
395
+ target_python = TargetPython()
396
+ if specifier is None:
397
+ specifier = specifiers.SpecifierSet()
398
+
399
+ supported_tags = target_python.get_tags()
400
+
401
+ return cls(
402
+ project_name=project_name,
403
+ supported_tags=supported_tags,
404
+ specifier=specifier,
405
+ prefer_binary=prefer_binary,
406
+ allow_all_prereleases=allow_all_prereleases,
407
+ hashes=hashes,
408
+ )
409
+
410
+ def __init__(
411
+ self,
412
+ project_name: str,
413
+ supported_tags: List[Tag],
414
+ specifier: specifiers.BaseSpecifier,
415
+ prefer_binary: bool = False,
416
+ allow_all_prereleases: bool = False,
417
+ hashes: Optional[Hashes] = None,
418
+ ) -> None:
419
+ """
420
+ :param supported_tags: The PEP 425 tags supported by the target
421
+ Python in order of preference (most preferred first).
422
+ """
423
+ self._allow_all_prereleases = allow_all_prereleases
424
+ self._hashes = hashes
425
+ self._prefer_binary = prefer_binary
426
+ self._project_name = project_name
427
+ self._specifier = specifier
428
+ self._supported_tags = supported_tags
429
+ # Since the index of the tag in the _supported_tags list is used
430
+ # as a priority, precompute a map from tag to index/priority to be
431
+ # used in wheel.find_most_preferred_tag.
432
+ self._wheel_tag_preferences = {
433
+ tag: idx for idx, tag in enumerate(supported_tags)
434
+ }
435
+
436
+ def get_applicable_candidates(
437
+ self,
438
+ candidates: List[InstallationCandidate],
439
+ ) -> List[InstallationCandidate]:
440
+ """
441
+ Return the applicable candidates from a list of candidates.
442
+ """
443
+ # Using None infers from the specifier instead.
444
+ allow_prereleases = self._allow_all_prereleases or None
445
+ specifier = self._specifier
446
+ versions = {
447
+ str(v)
448
+ for v in specifier.filter(
449
+ # We turn the version object into a str here because otherwise
450
+ # when we're debundled but setuptools isn't, Python will see
451
+ # packaging.version.Version and
452
+ # pkg_resources._vendor.packaging.version.Version as different
453
+ # types. This way we'll use a str as a common data interchange
454
+ # format. If we stop using the pkg_resources provided specifier
455
+ # and start using our own, we can drop the cast to str().
456
+ (str(c.version) for c in candidates),
457
+ prereleases=allow_prereleases,
458
+ )
459
+ }
460
+
461
+ # Again, converting version to str to deal with debundling.
462
+ applicable_candidates = [c for c in candidates if str(c.version) in versions]
463
+
464
+ filtered_applicable_candidates = filter_unallowed_hashes(
465
+ candidates=applicable_candidates,
466
+ hashes=self._hashes,
467
+ project_name=self._project_name,
468
+ )
469
+
470
+ return sorted(filtered_applicable_candidates, key=self._sort_key)
471
+
472
+ def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
473
+ """
474
+ Function to pass as the `key` argument to a call to sorted() to sort
475
+ InstallationCandidates by preference.
476
+
477
+ Returns a tuple such that tuples sorting as greater using Python's
478
+ default comparison operator are more preferred.
479
+
480
+ The preference is as follows:
481
+
482
+ First and foremost, candidates with allowed (matching) hashes are
483
+ always preferred over candidates without matching hashes. This is
484
+ because e.g. if the only candidate with an allowed hash is yanked,
485
+ we still want to use that candidate.
486
+
487
+ Second, excepting hash considerations, candidates that have been
488
+ yanked (in the sense of PEP 592) are always less preferred than
489
+ candidates that haven't been yanked. Then:
490
+
491
+ If not finding wheels, they are sorted by version only.
492
+ If finding wheels, then the sort order is by version, then:
493
+ 1. existing installs
494
+ 2. wheels ordered via Wheel.support_index_min(self._supported_tags)
495
+ 3. source archives
496
+ If prefer_binary was set, then all wheels are sorted above sources.
497
+
498
+ Note: it was considered to embed this logic into the Link
499
+ comparison operators, but then different sdist links
500
+ with the same version, would have to be considered equal
501
+ """
502
+ valid_tags = self._supported_tags
503
+ support_num = len(valid_tags)
504
+ build_tag: BuildTag = ()
505
+ binary_preference = 0
506
+ link = candidate.link
507
+ if link.is_wheel:
508
+ # can raise InvalidWheelFilename
509
+ wheel = Wheel(link.filename)
510
+ try:
511
+ pri = -(
512
+ wheel.find_most_preferred_tag(
513
+ valid_tags, self._wheel_tag_preferences
514
+ )
515
+ )
516
+ except ValueError:
517
+ raise UnsupportedWheel(
518
+ "{} is not a supported wheel for this platform. It "
519
+ "can't be sorted.".format(wheel.filename)
520
+ )
521
+ if self._prefer_binary:
522
+ binary_preference = 1
523
+ if wheel.build_tag is not None:
524
+ match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
525
+ build_tag_groups = match.groups()
526
+ build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
527
+ else: # sdist
528
+ pri = -(support_num)
529
+ has_allowed_hash = int(link.is_hash_allowed(self._hashes))
530
+ yank_value = -1 * int(link.is_yanked) # -1 for yanked.
531
+ return (
532
+ has_allowed_hash,
533
+ yank_value,
534
+ binary_preference,
535
+ candidate.version,
536
+ pri,
537
+ build_tag,
538
+ )
539
+
540
+ def sort_best_candidate(
541
+ self,
542
+ candidates: List[InstallationCandidate],
543
+ ) -> Optional[InstallationCandidate]:
544
+ """
545
+ Return the best candidate per the instance's sort order, or None if
546
+ no candidate is acceptable.
547
+ """
548
+ if not candidates:
549
+ return None
550
+ best_candidate = max(candidates, key=self._sort_key)
551
+ return best_candidate
552
+
553
+ def compute_best_candidate(
554
+ self,
555
+ candidates: List[InstallationCandidate],
556
+ ) -> BestCandidateResult:
557
+ """
558
+ Compute and return a `BestCandidateResult` instance.
559
+ """
560
+ applicable_candidates = self.get_applicable_candidates(candidates)
561
+
562
+ best_candidate = self.sort_best_candidate(applicable_candidates)
563
+
564
+ return BestCandidateResult(
565
+ candidates,
566
+ applicable_candidates=applicable_candidates,
567
+ best_candidate=best_candidate,
568
+ )
569
+
570
+
571
+ class PackageFinder:
572
+ """This finds packages.
573
+
574
+ This is meant to match easy_install's technique for looking for
575
+ packages, by reading pages and looking for appropriate links.
576
+ """
577
+
578
+ def __init__(
579
+ self,
580
+ link_collector: LinkCollector,
581
+ target_python: TargetPython,
582
+ allow_yanked: bool,
583
+ use_deprecated_html5lib: bool,
584
+ format_control: Optional[FormatControl] = None,
585
+ candidate_prefs: Optional[CandidatePreferences] = None,
586
+ ignore_requires_python: Optional[bool] = None,
587
+ ) -> None:
588
+ """
589
+ This constructor is primarily meant to be used by the create() class
590
+ method and from tests.
591
+
592
+ :param format_control: A FormatControl object, used to control
593
+ the selection of source packages / binary packages when consulting
594
+ the index and links.
595
+ :param candidate_prefs: Options to use when creating a
596
+ CandidateEvaluator object.
597
+ """
598
+ if candidate_prefs is None:
599
+ candidate_prefs = CandidatePreferences()
600
+
601
+ format_control = format_control or FormatControl(set(), set())
602
+
603
+ self._allow_yanked = allow_yanked
604
+ self._candidate_prefs = candidate_prefs
605
+ self._ignore_requires_python = ignore_requires_python
606
+ self._link_collector = link_collector
607
+ self._target_python = target_python
608
+ self._use_deprecated_html5lib = use_deprecated_html5lib
609
+
610
+ self.format_control = format_control
611
+
612
+ # These are boring links that have already been logged somehow.
613
+ self._logged_links: Set[Link] = set()
614
+
615
+ # Don't include an allow_yanked default value to make sure each call
616
+ # site considers whether yanked releases are allowed. This also causes
617
+ # that decision to be made explicit in the calling code, which helps
618
+ # people when reading the code.
619
+ @classmethod
620
+ def create(
621
+ cls,
622
+ link_collector: LinkCollector,
623
+ selection_prefs: SelectionPreferences,
624
+ target_python: Optional[TargetPython] = None,
625
+ *,
626
+ use_deprecated_html5lib: bool,
627
+ ) -> "PackageFinder":
628
+ """Create a PackageFinder.
629
+
630
+ :param selection_prefs: The candidate selection preferences, as a
631
+ SelectionPreferences object.
632
+ :param target_python: The target Python interpreter to use when
633
+ checking compatibility. If None (the default), a TargetPython
634
+ object will be constructed from the running Python.
635
+ """
636
+ if target_python is None:
637
+ target_python = TargetPython()
638
+
639
+ candidate_prefs = CandidatePreferences(
640
+ prefer_binary=selection_prefs.prefer_binary,
641
+ allow_all_prereleases=selection_prefs.allow_all_prereleases,
642
+ )
643
+
644
+ return cls(
645
+ candidate_prefs=candidate_prefs,
646
+ link_collector=link_collector,
647
+ target_python=target_python,
648
+ allow_yanked=selection_prefs.allow_yanked,
649
+ format_control=selection_prefs.format_control,
650
+ ignore_requires_python=selection_prefs.ignore_requires_python,
651
+ use_deprecated_html5lib=use_deprecated_html5lib,
652
+ )
653
+
654
+ @property
655
+ def target_python(self) -> TargetPython:
656
+ return self._target_python
657
+
658
+ @property
659
+ def search_scope(self) -> SearchScope:
660
+ return self._link_collector.search_scope
661
+
662
+ @search_scope.setter
663
+ def search_scope(self, search_scope: SearchScope) -> None:
664
+ self._link_collector.search_scope = search_scope
665
+
666
+ @property
667
+ def find_links(self) -> List[str]:
668
+ return self._link_collector.find_links
669
+
670
+ @property
671
+ def index_urls(self) -> List[str]:
672
+ return self.search_scope.index_urls
673
+
674
+ @property
675
+ def trusted_hosts(self) -> Iterable[str]:
676
+ for host_port in self._link_collector.session.pip_trusted_origins:
677
+ yield build_netloc(*host_port)
678
+
679
+ @property
680
+ def allow_all_prereleases(self) -> bool:
681
+ return self._candidate_prefs.allow_all_prereleases
682
+
683
+ def set_allow_all_prereleases(self) -> None:
684
+ self._candidate_prefs.allow_all_prereleases = True
685
+
686
+ @property
687
+ def prefer_binary(self) -> bool:
688
+ return self._candidate_prefs.prefer_binary
689
+
690
+ def set_prefer_binary(self) -> None:
691
+ self._candidate_prefs.prefer_binary = True
692
+
693
+ def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
694
+ canonical_name = canonicalize_name(project_name)
695
+ formats = self.format_control.get_allowed_formats(canonical_name)
696
+
697
+ return LinkEvaluator(
698
+ project_name=project_name,
699
+ canonical_name=canonical_name,
700
+ formats=formats,
701
+ target_python=self._target_python,
702
+ allow_yanked=self._allow_yanked,
703
+ ignore_requires_python=self._ignore_requires_python,
704
+ )
705
+
706
+ def _sort_links(self, links: Iterable[Link]) -> List[Link]:
707
+ """
708
+ Returns elements of links in order, non-egg links first, egg links
709
+ second, while eliminating duplicates
710
+ """
711
+ eggs, no_eggs = [], []
712
+ seen: Set[Link] = set()
713
+ for link in links:
714
+ if link not in seen:
715
+ seen.add(link)
716
+ if link.egg_fragment:
717
+ eggs.append(link)
718
+ else:
719
+ no_eggs.append(link)
720
+ return no_eggs + eggs
721
+
722
+ def _log_skipped_link(self, link: Link, reason: str) -> None:
723
+ if link not in self._logged_links:
724
+ # Put the link at the end so the reason is more visible and because
725
+ # the link string is usually very long.
726
+ logger.debug("Skipping link: %s: %s", reason, link)
727
+ self._logged_links.add(link)
728
+
729
+ def get_install_candidate(
730
+ self, link_evaluator: LinkEvaluator, link: Link
731
+ ) -> Optional[InstallationCandidate]:
732
+ """
733
+ If the link is a candidate for install, convert it to an
734
+ InstallationCandidate and return it. Otherwise, return None.
735
+ """
736
+ is_candidate, result = link_evaluator.evaluate_link(link)
737
+ if not is_candidate:
738
+ if result:
739
+ self._log_skipped_link(link, reason=result)
740
+ return None
741
+
742
+ return InstallationCandidate(
743
+ name=link_evaluator.project_name,
744
+ link=link,
745
+ version=result,
746
+ )
747
+
748
+ def evaluate_links(
749
+ self, link_evaluator: LinkEvaluator, links: Iterable[Link]
750
+ ) -> List[InstallationCandidate]:
751
+ """
752
+ Convert links that are candidates to InstallationCandidate objects.
753
+ """
754
+ candidates = []
755
+ for link in self._sort_links(links):
756
+ candidate = self.get_install_candidate(link_evaluator, link)
757
+ if candidate is not None:
758
+ candidates.append(candidate)
759
+
760
+ return candidates
761
+
762
+ def process_project_url(
763
+ self, project_url: Link, link_evaluator: LinkEvaluator
764
+ ) -> List[InstallationCandidate]:
765
+ logger.debug(
766
+ "Fetching project page and analyzing links: %s",
767
+ project_url,
768
+ )
769
+ html_page = self._link_collector.fetch_page(project_url)
770
+ if html_page is None:
771
+ return []
772
+
773
+ page_links = list(parse_links(html_page, self._use_deprecated_html5lib))
774
+
775
+ with indent_log():
776
+ package_links = self.evaluate_links(
777
+ link_evaluator,
778
+ links=page_links,
779
+ )
780
+
781
+ return package_links
782
+
783
+ @functools.lru_cache(maxsize=None)
784
+ def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
785
+ """Find all available InstallationCandidate for project_name
786
+
787
+ This checks index_urls and find_links.
788
+ All versions found are returned as an InstallationCandidate list.
789
+
790
+ See LinkEvaluator.evaluate_link() for details on which files
791
+ are accepted.
792
+ """
793
+ link_evaluator = self.make_link_evaluator(project_name)
794
+
795
+ collected_sources = self._link_collector.collect_sources(
796
+ project_name=project_name,
797
+ candidates_from_page=functools.partial(
798
+ self.process_project_url,
799
+ link_evaluator=link_evaluator,
800
+ ),
801
+ )
802
+
803
+ page_candidates_it = itertools.chain.from_iterable(
804
+ source.page_candidates()
805
+ for sources in collected_sources
806
+ for source in sources
807
+ if source is not None
808
+ )
809
+ page_candidates = list(page_candidates_it)
810
+
811
+ file_links_it = itertools.chain.from_iterable(
812
+ source.file_links()
813
+ for sources in collected_sources
814
+ for source in sources
815
+ if source is not None
816
+ )
817
+ file_candidates = self.evaluate_links(
818
+ link_evaluator,
819
+ sorted(file_links_it, reverse=True),
820
+ )
821
+
822
+ if logger.isEnabledFor(logging.DEBUG) and file_candidates:
823
+ paths = []
824
+ for candidate in file_candidates:
825
+ assert candidate.link.url # we need to have a URL
826
+ try:
827
+ paths.append(candidate.link.file_path)
828
+ except Exception:
829
+ paths.append(candidate.link.url) # it's not a local file
830
+
831
+ logger.debug("Local files found: %s", ", ".join(paths))
832
+
833
+ # This is an intentional priority ordering
834
+ return file_candidates + page_candidates
835
+
836
+ def make_candidate_evaluator(
837
+ self,
838
+ project_name: str,
839
+ specifier: Optional[specifiers.BaseSpecifier] = None,
840
+ hashes: Optional[Hashes] = None,
841
+ ) -> CandidateEvaluator:
842
+ """Create a CandidateEvaluator object to use."""
843
+ candidate_prefs = self._candidate_prefs
844
+ return CandidateEvaluator.create(
845
+ project_name=project_name,
846
+ target_python=self._target_python,
847
+ prefer_binary=candidate_prefs.prefer_binary,
848
+ allow_all_prereleases=candidate_prefs.allow_all_prereleases,
849
+ specifier=specifier,
850
+ hashes=hashes,
851
+ )
852
+
853
+ @functools.lru_cache(maxsize=None)
854
+ def find_best_candidate(
855
+ self,
856
+ project_name: str,
857
+ specifier: Optional[specifiers.BaseSpecifier] = None,
858
+ hashes: Optional[Hashes] = None,
859
+ ) -> BestCandidateResult:
860
+ """Find matches for the given project and specifier.
861
+
862
+ :param specifier: An optional object implementing `filter`
863
+ (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
864
+ versions.
865
+
866
+ :return: A `BestCandidateResult` instance.
867
+ """
868
+ candidates = self.find_all_candidates(project_name)
869
+ candidate_evaluator = self.make_candidate_evaluator(
870
+ project_name=project_name,
871
+ specifier=specifier,
872
+ hashes=hashes,
873
+ )
874
+ return candidate_evaluator.compute_best_candidate(candidates)
875
+
876
+ def find_requirement(
877
+ self, req: InstallRequirement, upgrade: bool
878
+ ) -> Optional[InstallationCandidate]:
879
+ """Try to find a Link matching req
880
+
881
+ Expects req, an InstallRequirement and upgrade, a boolean
882
+ Returns a InstallationCandidate if found,
883
+ Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
884
+ """
885
+ hashes = req.hashes(trust_internet=False)
886
+ best_candidate_result = self.find_best_candidate(
887
+ req.name,
888
+ specifier=req.specifier,
889
+ hashes=hashes,
890
+ )
891
+ best_candidate = best_candidate_result.best_candidate
892
+
893
+ installed_version: Optional[_BaseVersion] = None
894
+ if req.satisfied_by is not None:
895
+ installed_version = req.satisfied_by.version
896
+
897
+ def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
898
+ # This repeated parse_version and str() conversion is needed to
899
+ # handle different vendoring sources from pip and pkg_resources.
900
+ # If we stop using the pkg_resources provided specifier and start
901
+ # using our own, we can drop the cast to str().
902
+ return (
903
+ ", ".join(
904
+ sorted(
905
+ {str(c.version) for c in cand_iter},
906
+ key=parse_version,
907
+ )
908
+ )
909
+ or "none"
910
+ )
911
+
912
+ if installed_version is None and best_candidate is None:
913
+ logger.critical(
914
+ "Could not find a version that satisfies the requirement %s "
915
+ "(from versions: %s)",
916
+ req,
917
+ _format_versions(best_candidate_result.iter_all()),
918
+ )
919
+
920
+ raise DistributionNotFound(
921
+ "No matching distribution found for {}".format(req)
922
+ )
923
+
924
+ best_installed = False
925
+ if installed_version and (
926
+ best_candidate is None or best_candidate.version <= installed_version
927
+ ):
928
+ best_installed = True
929
+
930
+ if not upgrade and installed_version is not None:
931
+ if best_installed:
932
+ logger.debug(
933
+ "Existing installed version (%s) is most up-to-date and "
934
+ "satisfies requirement",
935
+ installed_version,
936
+ )
937
+ else:
938
+ logger.debug(
939
+ "Existing installed version (%s) satisfies requirement "
940
+ "(most up-to-date version is %s)",
941
+ installed_version,
942
+ best_candidate.version,
943
+ )
944
+ return None
945
+
946
+ if best_installed:
947
+ # We have an existing version, and its the best version
948
+ logger.debug(
949
+ "Installed version (%s) is most up-to-date (past versions: %s)",
950
+ installed_version,
951
+ _format_versions(best_candidate_result.iter_applicable()),
952
+ )
953
+ raise BestVersionAlreadyInstalled
954
+
955
+ logger.debug(
956
+ "Using version %s (newest of versions: %s)",
957
+ best_candidate.version,
958
+ _format_versions(best_candidate_result.iter_applicable()),
959
+ )
960
+ return best_candidate
961
+
962
+
963
+ def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
964
+ """Find the separator's index based on the package's canonical name.
965
+
966
+ :param fragment: A <package>+<version> filename "fragment" (stem) or
967
+ egg fragment.
968
+ :param canonical_name: The package's canonical name.
969
+
970
+ This function is needed since the canonicalized name does not necessarily
971
+ have the same length as the egg info's name part. An example::
972
+
973
+ >>> fragment = 'foo__bar-1.0'
974
+ >>> canonical_name = 'foo-bar'
975
+ >>> _find_name_version_sep(fragment, canonical_name)
976
+ 8
977
+ """
978
+ # Project name and version must be separated by one single dash. Find all
979
+ # occurrences of dashes; if the string in front of it matches the canonical
980
+ # name, this is the one separating the name and version parts.
981
+ for i, c in enumerate(fragment):
982
+ if c != "-":
983
+ continue
984
+ if canonicalize_name(fragment[:i]) == canonical_name:
985
+ return i
986
+ raise ValueError(f"{fragment} does not match {canonical_name}")
987
+
988
+
989
+ def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
990
+ """Parse the version string from a <package>+<version> filename
991
+ "fragment" (stem) or egg fragment.
992
+
993
+ :param fragment: The string to parse. E.g. foo-2.1
994
+ :param canonical_name: The canonicalized name of the package this
995
+ belongs to.
996
+ """
997
+ try:
998
+ version_start = _find_name_version_sep(fragment, canonical_name) + 1
999
+ except ValueError:
1000
+ return None
1001
+ version = fragment[version_start:]
1002
+ if not version:
1003
+ return None
1004
+ return version
llmeval-env/lib/python3.10/site-packages/pip/_internal/index/sources.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import mimetypes
3
+ import os
4
+ import pathlib
5
+ from typing import Callable, Iterable, Optional, Tuple
6
+
7
+ from pip._internal.models.candidate import InstallationCandidate
8
+ from pip._internal.models.link import Link
9
+ from pip._internal.utils.urls import path_to_url, url_to_path
10
+ from pip._internal.vcs import is_url
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ FoundCandidates = Iterable[InstallationCandidate]
15
+ FoundLinks = Iterable[Link]
16
+ CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]]
17
+ PageValidator = Callable[[Link], bool]
18
+
19
+
20
+ class LinkSource:
21
+ @property
22
+ def link(self) -> Optional[Link]:
23
+ """Returns the underlying link, if there's one."""
24
+ raise NotImplementedError()
25
+
26
+ def page_candidates(self) -> FoundCandidates:
27
+ """Candidates found by parsing an archive listing HTML file."""
28
+ raise NotImplementedError()
29
+
30
+ def file_links(self) -> FoundLinks:
31
+ """Links found by specifying archives directly."""
32
+ raise NotImplementedError()
33
+
34
+
35
+ def _is_html_file(file_url: str) -> bool:
36
+ return mimetypes.guess_type(file_url, strict=False)[0] == "text/html"
37
+
38
+
39
+ class _FlatDirectorySource(LinkSource):
40
+ """Link source specified by ``--find-links=<path-to-dir>``.
41
+
42
+ This looks the content of the directory, and returns:
43
+
44
+ * ``page_candidates``: Links listed on each HTML file in the directory.
45
+ * ``file_candidates``: Archives in the directory.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ candidates_from_page: CandidatesFromPage,
51
+ path: str,
52
+ ) -> None:
53
+ self._candidates_from_page = candidates_from_page
54
+ self._path = pathlib.Path(os.path.realpath(path))
55
+
56
+ @property
57
+ def link(self) -> Optional[Link]:
58
+ return None
59
+
60
+ def page_candidates(self) -> FoundCandidates:
61
+ for path in self._path.iterdir():
62
+ url = path_to_url(str(path))
63
+ if not _is_html_file(url):
64
+ continue
65
+ yield from self._candidates_from_page(Link(url))
66
+
67
+ def file_links(self) -> FoundLinks:
68
+ for path in self._path.iterdir():
69
+ url = path_to_url(str(path))
70
+ if _is_html_file(url):
71
+ continue
72
+ yield Link(url)
73
+
74
+
75
+ class _LocalFileSource(LinkSource):
76
+ """``--find-links=<path-or-url>`` or ``--[extra-]index-url=<path-or-url>``.
77
+
78
+ If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to
79
+ the option, it is converted to a URL first. This returns:
80
+
81
+ * ``page_candidates``: Links listed on an HTML file.
82
+ * ``file_candidates``: The non-HTML file.
83
+ """
84
+
85
+ def __init__(
86
+ self,
87
+ candidates_from_page: CandidatesFromPage,
88
+ link: Link,
89
+ ) -> None:
90
+ self._candidates_from_page = candidates_from_page
91
+ self._link = link
92
+
93
+ @property
94
+ def link(self) -> Optional[Link]:
95
+ return self._link
96
+
97
+ def page_candidates(self) -> FoundCandidates:
98
+ if not _is_html_file(self._link.url):
99
+ return
100
+ yield from self._candidates_from_page(self._link)
101
+
102
+ def file_links(self) -> FoundLinks:
103
+ if _is_html_file(self._link.url):
104
+ return
105
+ yield self._link
106
+
107
+
108
+ class _RemoteFileSource(LinkSource):
109
+ """``--find-links=<url>`` or ``--[extra-]index-url=<url>``.
110
+
111
+ This returns:
112
+
113
+ * ``page_candidates``: Links listed on an HTML file.
114
+ * ``file_candidates``: The non-HTML file.
115
+ """
116
+
117
+ def __init__(
118
+ self,
119
+ candidates_from_page: CandidatesFromPage,
120
+ page_validator: PageValidator,
121
+ link: Link,
122
+ ) -> None:
123
+ self._candidates_from_page = candidates_from_page
124
+ self._page_validator = page_validator
125
+ self._link = link
126
+
127
+ @property
128
+ def link(self) -> Optional[Link]:
129
+ return self._link
130
+
131
+ def page_candidates(self) -> FoundCandidates:
132
+ if not self._page_validator(self._link):
133
+ return
134
+ yield from self._candidates_from_page(self._link)
135
+
136
+ def file_links(self) -> FoundLinks:
137
+ yield self._link
138
+
139
+
140
+ class _IndexDirectorySource(LinkSource):
141
+ """``--[extra-]index-url=<path-to-directory>``.
142
+
143
+ This is treated like a remote URL; ``candidates_from_page`` contains logic
144
+ for this by appending ``index.html`` to the link.
145
+ """
146
+
147
+ def __init__(
148
+ self,
149
+ candidates_from_page: CandidatesFromPage,
150
+ link: Link,
151
+ ) -> None:
152
+ self._candidates_from_page = candidates_from_page
153
+ self._link = link
154
+
155
+ @property
156
+ def link(self) -> Optional[Link]:
157
+ return self._link
158
+
159
+ def page_candidates(self) -> FoundCandidates:
160
+ yield from self._candidates_from_page(self._link)
161
+
162
+ def file_links(self) -> FoundLinks:
163
+ return ()
164
+
165
+
166
+ def build_source(
167
+ location: str,
168
+ *,
169
+ candidates_from_page: CandidatesFromPage,
170
+ page_validator: PageValidator,
171
+ expand_dir: bool,
172
+ cache_link_parsing: bool,
173
+ ) -> Tuple[Optional[str], Optional[LinkSource]]:
174
+
175
+ path: Optional[str] = None
176
+ url: Optional[str] = None
177
+ if os.path.exists(location): # Is a local path.
178
+ url = path_to_url(location)
179
+ path = location
180
+ elif location.startswith("file:"): # A file: URL.
181
+ url = location
182
+ path = url_to_path(location)
183
+ elif is_url(location):
184
+ url = location
185
+
186
+ if url is None:
187
+ msg = (
188
+ "Location '%s' is ignored: "
189
+ "it is either a non-existing path or lacks a specific scheme."
190
+ )
191
+ logger.warning(msg, location)
192
+ return (None, None)
193
+
194
+ if path is None:
195
+ source: LinkSource = _RemoteFileSource(
196
+ candidates_from_page=candidates_from_page,
197
+ page_validator=page_validator,
198
+ link=Link(url, cache_link_parsing=cache_link_parsing),
199
+ )
200
+ return (url, source)
201
+
202
+ if os.path.isdir(path):
203
+ if expand_dir:
204
+ source = _FlatDirectorySource(
205
+ candidates_from_page=candidates_from_page,
206
+ path=path,
207
+ )
208
+ else:
209
+ source = _IndexDirectorySource(
210
+ candidates_from_page=candidates_from_page,
211
+ link=Link(url, cache_link_parsing=cache_link_parsing),
212
+ )
213
+ return (url, source)
214
+ elif os.path.isfile(path):
215
+ source = _LocalFileSource(
216
+ candidates_from_page=candidates_from_page,
217
+ link=Link(url, cache_link_parsing=cache_link_parsing),
218
+ )
219
+ return (url, source)
220
+ logger.warning(
221
+ "Location '%s' is ignored: it is neither a file nor a directory.",
222
+ location,
223
+ )
224
+ return (url, None)
llmeval-env/lib/python3.10/site-packages/pip/_internal/main.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+
4
+ def main(args: Optional[List[str]] = None) -> int:
5
+ """This is preserved for old console scripts that may still be referencing
6
+ it.
7
+
8
+ For additional details, see https://github.com/pypa/pip/issues/7498.
9
+ """
10
+ from pip._internal.utils.entrypoints import _wrapper
11
+
12
+ return _wrapper(args)
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ """Contains purely network-related utilities.
2
+ """
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (249 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/auth.cpython-310.pyc ADDED
Binary file (7.52 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/cache.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/download.cpython-310.pyc ADDED
Binary file (5.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-310.pyc ADDED
Binary file (8.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/session.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/auth.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Network Authentication Helpers
2
+
3
+ Contains interface (MultiDomainBasicAuth) and associated glue code for
4
+ providing credentials in the context of network requests.
5
+ """
6
+
7
+ import urllib.parse
8
+ from typing import Any, Dict, List, Optional, Tuple
9
+
10
+ from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
11
+ from pip._vendor.requests.models import Request, Response
12
+ from pip._vendor.requests.utils import get_netrc_auth
13
+
14
+ from pip._internal.utils.logging import getLogger
15
+ from pip._internal.utils.misc import (
16
+ ask,
17
+ ask_input,
18
+ ask_password,
19
+ remove_auth_from_url,
20
+ split_auth_netloc_from_url,
21
+ )
22
+ from pip._internal.vcs.versioncontrol import AuthInfo
23
+
24
+ logger = getLogger(__name__)
25
+
26
+ Credentials = Tuple[str, str, str]
27
+
28
+ try:
29
+ import keyring
30
+ except ImportError:
31
+ keyring = None # type: ignore[assignment]
32
+ except Exception as exc:
33
+ logger.warning(
34
+ "Keyring is skipped due to an exception: %s",
35
+ str(exc),
36
+ )
37
+ keyring = None # type: ignore[assignment]
38
+
39
+
40
+ def get_keyring_auth(url: Optional[str], username: Optional[str]) -> Optional[AuthInfo]:
41
+ """Return the tuple auth for a given url from keyring."""
42
+ global keyring
43
+ if not url or not keyring:
44
+ return None
45
+
46
+ try:
47
+ try:
48
+ get_credential = keyring.get_credential
49
+ except AttributeError:
50
+ pass
51
+ else:
52
+ logger.debug("Getting credentials from keyring for %s", url)
53
+ cred = get_credential(url, username)
54
+ if cred is not None:
55
+ return cred.username, cred.password
56
+ return None
57
+
58
+ if username:
59
+ logger.debug("Getting password from keyring for %s", url)
60
+ password = keyring.get_password(url, username)
61
+ if password:
62
+ return username, password
63
+
64
+ except Exception as exc:
65
+ logger.warning(
66
+ "Keyring is skipped due to an exception: %s",
67
+ str(exc),
68
+ )
69
+ keyring = None # type: ignore[assignment]
70
+ return None
71
+
72
+
73
+ class MultiDomainBasicAuth(AuthBase):
74
+ def __init__(
75
+ self, prompting: bool = True, index_urls: Optional[List[str]] = None
76
+ ) -> None:
77
+ self.prompting = prompting
78
+ self.index_urls = index_urls
79
+ self.passwords: Dict[str, AuthInfo] = {}
80
+ # When the user is prompted to enter credentials and keyring is
81
+ # available, we will offer to save them. If the user accepts,
82
+ # this value is set to the credentials they entered. After the
83
+ # request authenticates, the caller should call
84
+ # ``save_credentials`` to save these.
85
+ self._credentials_to_save: Optional[Credentials] = None
86
+
87
+ def _get_index_url(self, url: str) -> Optional[str]:
88
+ """Return the original index URL matching the requested URL.
89
+
90
+ Cached or dynamically generated credentials may work against
91
+ the original index URL rather than just the netloc.
92
+
93
+ The provided url should have had its username and password
94
+ removed already. If the original index url had credentials then
95
+ they will be included in the return value.
96
+
97
+ Returns None if no matching index was found, or if --no-index
98
+ was specified by the user.
99
+ """
100
+ if not url or not self.index_urls:
101
+ return None
102
+
103
+ for u in self.index_urls:
104
+ prefix = remove_auth_from_url(u).rstrip("/") + "/"
105
+ if url.startswith(prefix):
106
+ return u
107
+ return None
108
+
109
+ def _get_new_credentials(
110
+ self,
111
+ original_url: str,
112
+ allow_netrc: bool = True,
113
+ allow_keyring: bool = False,
114
+ ) -> AuthInfo:
115
+ """Find and return credentials for the specified URL."""
116
+ # Split the credentials and netloc from the url.
117
+ url, netloc, url_user_password = split_auth_netloc_from_url(
118
+ original_url,
119
+ )
120
+
121
+ # Start with the credentials embedded in the url
122
+ username, password = url_user_password
123
+ if username is not None and password is not None:
124
+ logger.debug("Found credentials in url for %s", netloc)
125
+ return url_user_password
126
+
127
+ # Find a matching index url for this request
128
+ index_url = self._get_index_url(url)
129
+ if index_url:
130
+ # Split the credentials from the url.
131
+ index_info = split_auth_netloc_from_url(index_url)
132
+ if index_info:
133
+ index_url, _, index_url_user_password = index_info
134
+ logger.debug("Found index url %s", index_url)
135
+
136
+ # If an index URL was found, try its embedded credentials
137
+ if index_url and index_url_user_password[0] is not None:
138
+ username, password = index_url_user_password
139
+ if username is not None and password is not None:
140
+ logger.debug("Found credentials in index url for %s", netloc)
141
+ return index_url_user_password
142
+
143
+ # Get creds from netrc if we still don't have them
144
+ if allow_netrc:
145
+ netrc_auth = get_netrc_auth(original_url)
146
+ if netrc_auth:
147
+ logger.debug("Found credentials in netrc for %s", netloc)
148
+ return netrc_auth
149
+
150
+ # If we don't have a password and keyring is available, use it.
151
+ if allow_keyring:
152
+ # The index url is more specific than the netloc, so try it first
153
+ # fmt: off
154
+ kr_auth = (
155
+ get_keyring_auth(index_url, username) or
156
+ get_keyring_auth(netloc, username)
157
+ )
158
+ # fmt: on
159
+ if kr_auth:
160
+ logger.debug("Found credentials in keyring for %s", netloc)
161
+ return kr_auth
162
+
163
+ return username, password
164
+
165
+ def _get_url_and_credentials(
166
+ self, original_url: str
167
+ ) -> Tuple[str, Optional[str], Optional[str]]:
168
+ """Return the credentials to use for the provided URL.
169
+
170
+ If allowed, netrc and keyring may be used to obtain the
171
+ correct credentials.
172
+
173
+ Returns (url_without_credentials, username, password). Note
174
+ that even if the original URL contains credentials, this
175
+ function may return a different username and password.
176
+ """
177
+ url, netloc, _ = split_auth_netloc_from_url(original_url)
178
+
179
+ # Try to get credentials from original url
180
+ username, password = self._get_new_credentials(original_url)
181
+
182
+ # If credentials not found, use any stored credentials for this netloc.
183
+ # Do this if either the username or the password is missing.
184
+ # This accounts for the situation in which the user has specified
185
+ # the username in the index url, but the password comes from keyring.
186
+ if (username is None or password is None) and netloc in self.passwords:
187
+ un, pw = self.passwords[netloc]
188
+ # It is possible that the cached credentials are for a different username,
189
+ # in which case the cache should be ignored.
190
+ if username is None or username == un:
191
+ username, password = un, pw
192
+
193
+ if username is not None or password is not None:
194
+ # Convert the username and password if they're None, so that
195
+ # this netloc will show up as "cached" in the conditional above.
196
+ # Further, HTTPBasicAuth doesn't accept None, so it makes sense to
197
+ # cache the value that is going to be used.
198
+ username = username or ""
199
+ password = password or ""
200
+
201
+ # Store any acquired credentials.
202
+ self.passwords[netloc] = (username, password)
203
+
204
+ assert (
205
+ # Credentials were found
206
+ (username is not None and password is not None)
207
+ # Credentials were not found
208
+ or (username is None and password is None)
209
+ ), f"Could not load credentials from url: {original_url}"
210
+
211
+ return url, username, password
212
+
213
+ def __call__(self, req: Request) -> Request:
214
+ # Get credentials for this request
215
+ url, username, password = self._get_url_and_credentials(req.url)
216
+
217
+ # Set the url of the request to the url without any credentials
218
+ req.url = url
219
+
220
+ if username is not None and password is not None:
221
+ # Send the basic auth with this request
222
+ req = HTTPBasicAuth(username, password)(req)
223
+
224
+ # Attach a hook to handle 401 responses
225
+ req.register_hook("response", self.handle_401)
226
+
227
+ return req
228
+
229
+ # Factored out to allow for easy patching in tests
230
+ def _prompt_for_password(
231
+ self, netloc: str
232
+ ) -> Tuple[Optional[str], Optional[str], bool]:
233
+ username = ask_input(f"User for {netloc}: ")
234
+ if not username:
235
+ return None, None, False
236
+ auth = get_keyring_auth(netloc, username)
237
+ if auth and auth[0] is not None and auth[1] is not None:
238
+ return auth[0], auth[1], False
239
+ password = ask_password("Password: ")
240
+ return username, password, True
241
+
242
+ # Factored out to allow for easy patching in tests
243
+ def _should_save_password_to_keyring(self) -> bool:
244
+ if not keyring:
245
+ return False
246
+ return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
247
+
248
+ def handle_401(self, resp: Response, **kwargs: Any) -> Response:
249
+ # We only care about 401 responses, anything else we want to just
250
+ # pass through the actual response
251
+ if resp.status_code != 401:
252
+ return resp
253
+
254
+ # We are not able to prompt the user so simply return the response
255
+ if not self.prompting:
256
+ return resp
257
+
258
+ parsed = urllib.parse.urlparse(resp.url)
259
+
260
+ # Query the keyring for credentials:
261
+ username, password = self._get_new_credentials(
262
+ resp.url,
263
+ allow_netrc=False,
264
+ allow_keyring=True,
265
+ )
266
+
267
+ # Prompt the user for a new username and password
268
+ save = False
269
+ if not username and not password:
270
+ username, password, save = self._prompt_for_password(parsed.netloc)
271
+
272
+ # Store the new username and password to use for future requests
273
+ self._credentials_to_save = None
274
+ if username is not None and password is not None:
275
+ self.passwords[parsed.netloc] = (username, password)
276
+
277
+ # Prompt to save the password to keyring
278
+ if save and self._should_save_password_to_keyring():
279
+ self._credentials_to_save = (parsed.netloc, username, password)
280
+
281
+ # Consume content and release the original connection to allow our new
282
+ # request to reuse the same one.
283
+ resp.content
284
+ resp.raw.release_conn()
285
+
286
+ # Add our new username and password to the request
287
+ req = HTTPBasicAuth(username or "", password or "")(resp.request)
288
+ req.register_hook("response", self.warn_on_401)
289
+
290
+ # On successful request, save the credentials that were used to
291
+ # keyring. (Note that if the user responded "no" above, this member
292
+ # is not set and nothing will be saved.)
293
+ if self._credentials_to_save:
294
+ req.register_hook("response", self.save_credentials)
295
+
296
+ # Send our new request
297
+ new_resp = resp.connection.send(req, **kwargs)
298
+ new_resp.history.append(resp)
299
+
300
+ return new_resp
301
+
302
+ def warn_on_401(self, resp: Response, **kwargs: Any) -> None:
303
+ """Response callback to warn about incorrect credentials."""
304
+ if resp.status_code == 401:
305
+ logger.warning(
306
+ "401 Error, Credentials not correct for %s",
307
+ resp.request.url,
308
+ )
309
+
310
+ def save_credentials(self, resp: Response, **kwargs: Any) -> None:
311
+ """Response callback to save credentials on success."""
312
+ assert keyring is not None, "should never reach here without keyring"
313
+ if not keyring:
314
+ return
315
+
316
+ creds = self._credentials_to_save
317
+ self._credentials_to_save = None
318
+ if creds and resp.status_code < 400:
319
+ try:
320
+ logger.info("Saving credentials to keyring")
321
+ keyring.set_password(*creds)
322
+ except Exception:
323
+ logger.exception("Failed to save credentials")
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/cache.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HTTP cache implementation.
2
+ """
3
+
4
+ import os
5
+ from contextlib import contextmanager
6
+ from typing import Iterator, Optional
7
+
8
+ from pip._vendor.cachecontrol.cache import BaseCache
9
+ from pip._vendor.cachecontrol.caches import FileCache
10
+ from pip._vendor.requests.models import Response
11
+
12
+ from pip._internal.utils.filesystem import adjacent_tmp_file, replace
13
+ from pip._internal.utils.misc import ensure_dir
14
+
15
+
16
+ def is_from_cache(response: Response) -> bool:
17
+ return getattr(response, "from_cache", False)
18
+
19
+
20
+ @contextmanager
21
+ def suppressed_cache_errors() -> Iterator[None]:
22
+ """If we can't access the cache then we can just skip caching and process
23
+ requests as if caching wasn't enabled.
24
+ """
25
+ try:
26
+ yield
27
+ except OSError:
28
+ pass
29
+
30
+
31
+ class SafeFileCache(BaseCache):
32
+ """
33
+ A file based cache which is safe to use even when the target directory may
34
+ not be accessible or writable.
35
+ """
36
+
37
+ def __init__(self, directory: str) -> None:
38
+ assert directory is not None, "Cache directory must not be None."
39
+ super().__init__()
40
+ self.directory = directory
41
+
42
+ def _get_cache_path(self, name: str) -> str:
43
+ # From cachecontrol.caches.file_cache.FileCache._fn, brought into our
44
+ # class for backwards-compatibility and to avoid using a non-public
45
+ # method.
46
+ hashed = FileCache.encode(name)
47
+ parts = list(hashed[:5]) + [hashed]
48
+ return os.path.join(self.directory, *parts)
49
+
50
+ def get(self, key: str) -> Optional[bytes]:
51
+ path = self._get_cache_path(key)
52
+ with suppressed_cache_errors():
53
+ with open(path, "rb") as f:
54
+ return f.read()
55
+
56
+ def set(self, key: str, value: bytes, expires: Optional[int] = None) -> None:
57
+ path = self._get_cache_path(key)
58
+ with suppressed_cache_errors():
59
+ ensure_dir(os.path.dirname(path))
60
+
61
+ with adjacent_tmp_file(path) as f:
62
+ f.write(value)
63
+
64
+ replace(f.name, path)
65
+
66
+ def delete(self, key: str) -> None:
67
+ path = self._get_cache_path(key)
68
+ with suppressed_cache_errors():
69
+ os.remove(path)
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/download.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Download files with progress indicators.
2
+ """
3
+ import cgi
4
+ import logging
5
+ import mimetypes
6
+ import os
7
+ from typing import Iterable, Optional, Tuple
8
+
9
+ from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
10
+
11
+ from pip._internal.cli.progress_bars import get_download_progress_renderer
12
+ from pip._internal.exceptions import NetworkConnectionError
13
+ from pip._internal.models.index import PyPI
14
+ from pip._internal.models.link import Link
15
+ from pip._internal.network.cache import is_from_cache
16
+ from pip._internal.network.session import PipSession
17
+ from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks
18
+ from pip._internal.utils.misc import format_size, redact_auth_from_url, splitext
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def _get_http_response_size(resp: Response) -> Optional[int]:
24
+ try:
25
+ return int(resp.headers["content-length"])
26
+ except (ValueError, KeyError, TypeError):
27
+ return None
28
+
29
+
30
+ def _prepare_download(
31
+ resp: Response,
32
+ link: Link,
33
+ progress_bar: str,
34
+ ) -> Iterable[bytes]:
35
+ total_length = _get_http_response_size(resp)
36
+
37
+ if link.netloc == PyPI.file_storage_domain:
38
+ url = link.show_url
39
+ else:
40
+ url = link.url_without_fragment
41
+
42
+ logged_url = redact_auth_from_url(url)
43
+
44
+ if total_length:
45
+ logged_url = "{} ({})".format(logged_url, format_size(total_length))
46
+
47
+ if is_from_cache(resp):
48
+ logger.info("Using cached %s", logged_url)
49
+ else:
50
+ logger.info("Downloading %s", logged_url)
51
+
52
+ if logger.getEffectiveLevel() > logging.INFO:
53
+ show_progress = False
54
+ elif is_from_cache(resp):
55
+ show_progress = False
56
+ elif not total_length:
57
+ show_progress = True
58
+ elif total_length > (40 * 1000):
59
+ show_progress = True
60
+ else:
61
+ show_progress = False
62
+
63
+ chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
64
+
65
+ if not show_progress:
66
+ return chunks
67
+
68
+ renderer = get_download_progress_renderer(bar_type=progress_bar, size=total_length)
69
+ return renderer(chunks)
70
+
71
+
72
+ def sanitize_content_filename(filename: str) -> str:
73
+ """
74
+ Sanitize the "filename" value from a Content-Disposition header.
75
+ """
76
+ return os.path.basename(filename)
77
+
78
+
79
+ def parse_content_disposition(content_disposition: str, default_filename: str) -> str:
80
+ """
81
+ Parse the "filename" value from a Content-Disposition header, and
82
+ return the default filename if the result is empty.
83
+ """
84
+ _type, params = cgi.parse_header(content_disposition)
85
+ filename = params.get("filename")
86
+ if filename:
87
+ # We need to sanitize the filename to prevent directory traversal
88
+ # in case the filename contains ".." path parts.
89
+ filename = sanitize_content_filename(filename)
90
+ return filename or default_filename
91
+
92
+
93
+ def _get_http_response_filename(resp: Response, link: Link) -> str:
94
+ """Get an ideal filename from the given HTTP response, falling back to
95
+ the link filename if not provided.
96
+ """
97
+ filename = link.filename # fallback
98
+ # Have a look at the Content-Disposition header for a better guess
99
+ content_disposition = resp.headers.get("content-disposition")
100
+ if content_disposition:
101
+ filename = parse_content_disposition(content_disposition, filename)
102
+ ext: Optional[str] = splitext(filename)[1]
103
+ if not ext:
104
+ ext = mimetypes.guess_extension(resp.headers.get("content-type", ""))
105
+ if ext:
106
+ filename += ext
107
+ if not ext and link.url != resp.url:
108
+ ext = os.path.splitext(resp.url)[1]
109
+ if ext:
110
+ filename += ext
111
+ return filename
112
+
113
+
114
+ def _http_get_download(session: PipSession, link: Link) -> Response:
115
+ target_url = link.url.split("#", 1)[0]
116
+ resp = session.get(target_url, headers=HEADERS, stream=True)
117
+ raise_for_status(resp)
118
+ return resp
119
+
120
+
121
+ class Downloader:
122
+ def __init__(
123
+ self,
124
+ session: PipSession,
125
+ progress_bar: str,
126
+ ) -> None:
127
+ self._session = session
128
+ self._progress_bar = progress_bar
129
+
130
+ def __call__(self, link: Link, location: str) -> Tuple[str, str]:
131
+ """Download the file given by link into location."""
132
+ try:
133
+ resp = _http_get_download(self._session, link)
134
+ except NetworkConnectionError as e:
135
+ assert e.response is not None
136
+ logger.critical(
137
+ "HTTP error %s while getting %s", e.response.status_code, link
138
+ )
139
+ raise
140
+
141
+ filename = _get_http_response_filename(resp, link)
142
+ filepath = os.path.join(location, filename)
143
+
144
+ chunks = _prepare_download(resp, link, self._progress_bar)
145
+ with open(filepath, "wb") as content_file:
146
+ for chunk in chunks:
147
+ content_file.write(chunk)
148
+ content_type = resp.headers.get("Content-Type", "")
149
+ return filepath, content_type
150
+
151
+
152
+ class BatchDownloader:
153
+ def __init__(
154
+ self,
155
+ session: PipSession,
156
+ progress_bar: str,
157
+ ) -> None:
158
+ self._session = session
159
+ self._progress_bar = progress_bar
160
+
161
+ def __call__(
162
+ self, links: Iterable[Link], location: str
163
+ ) -> Iterable[Tuple[Link, Tuple[str, str]]]:
164
+ """Download the files given by links into location."""
165
+ for link in links:
166
+ try:
167
+ resp = _http_get_download(self._session, link)
168
+ except NetworkConnectionError as e:
169
+ assert e.response is not None
170
+ logger.critical(
171
+ "HTTP error %s while getting %s",
172
+ e.response.status_code,
173
+ link,
174
+ )
175
+ raise
176
+
177
+ filename = _get_http_response_filename(resp, link)
178
+ filepath = os.path.join(location, filename)
179
+
180
+ chunks = _prepare_download(resp, link, self._progress_bar)
181
+ with open(filepath, "wb") as content_file:
182
+ for chunk in chunks:
183
+ content_file.write(chunk)
184
+ content_type = resp.headers.get("Content-Type", "")
185
+ yield link, (filepath, content_type)
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/lazy_wheel.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Lazy ZIP over HTTP"""
2
+
3
+ __all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"]
4
+
5
+ from bisect import bisect_left, bisect_right
6
+ from contextlib import contextmanager
7
+ from tempfile import NamedTemporaryFile
8
+ from typing import Any, Dict, Iterator, List, Optional, Tuple
9
+ from zipfile import BadZipfile, ZipFile
10
+
11
+ from pip._vendor.packaging.utils import canonicalize_name
12
+ from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
13
+
14
+ from pip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution
15
+ from pip._internal.network.session import PipSession
16
+ from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks
17
+
18
+
19
+ class HTTPRangeRequestUnsupported(Exception):
20
+ pass
21
+
22
+
23
+ def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution:
24
+ """Return a distribution object from the given wheel URL.
25
+
26
+ This uses HTTP range requests to only fetch the potion of the wheel
27
+ containing metadata, just enough for the object to be constructed.
28
+ If such requests are not supported, HTTPRangeRequestUnsupported
29
+ is raised.
30
+ """
31
+ with LazyZipOverHTTP(url, session) as zf:
32
+ # For read-only ZIP files, ZipFile only needs methods read,
33
+ # seek, seekable and tell, not the whole IO protocol.
34
+ wheel = MemoryWheel(zf.name, zf) # type: ignore
35
+ # After context manager exit, wheel.name
36
+ # is an invalid file by intention.
37
+ return get_wheel_distribution(wheel, canonicalize_name(name))
38
+
39
+
40
+ class LazyZipOverHTTP:
41
+ """File-like object mapped to a ZIP file over HTTP.
42
+
43
+ This uses HTTP range requests to lazily fetch the file's content,
44
+ which is supposed to be fed to ZipFile. If such requests are not
45
+ supported by the server, raise HTTPRangeRequestUnsupported
46
+ during initialization.
47
+ """
48
+
49
+ def __init__(
50
+ self, url: str, session: PipSession, chunk_size: int = CONTENT_CHUNK_SIZE
51
+ ) -> None:
52
+ head = session.head(url, headers=HEADERS)
53
+ raise_for_status(head)
54
+ assert head.status_code == 200
55
+ self._session, self._url, self._chunk_size = session, url, chunk_size
56
+ self._length = int(head.headers["Content-Length"])
57
+ self._file = NamedTemporaryFile()
58
+ self.truncate(self._length)
59
+ self._left: List[int] = []
60
+ self._right: List[int] = []
61
+ if "bytes" not in head.headers.get("Accept-Ranges", "none"):
62
+ raise HTTPRangeRequestUnsupported("range request is not supported")
63
+ self._check_zip()
64
+
65
+ @property
66
+ def mode(self) -> str:
67
+ """Opening mode, which is always rb."""
68
+ return "rb"
69
+
70
+ @property
71
+ def name(self) -> str:
72
+ """Path to the underlying file."""
73
+ return self._file.name
74
+
75
+ def seekable(self) -> bool:
76
+ """Return whether random access is supported, which is True."""
77
+ return True
78
+
79
+ def close(self) -> None:
80
+ """Close the file."""
81
+ self._file.close()
82
+
83
+ @property
84
+ def closed(self) -> bool:
85
+ """Whether the file is closed."""
86
+ return self._file.closed
87
+
88
+ def read(self, size: int = -1) -> bytes:
89
+ """Read up to size bytes from the object and return them.
90
+
91
+ As a convenience, if size is unspecified or -1,
92
+ all bytes until EOF are returned. Fewer than
93
+ size bytes may be returned if EOF is reached.
94
+ """
95
+ download_size = max(size, self._chunk_size)
96
+ start, length = self.tell(), self._length
97
+ stop = length if size < 0 else min(start + download_size, length)
98
+ start = max(0, stop - download_size)
99
+ self._download(start, stop - 1)
100
+ return self._file.read(size)
101
+
102
+ def readable(self) -> bool:
103
+ """Return whether the file is readable, which is True."""
104
+ return True
105
+
106
+ def seek(self, offset: int, whence: int = 0) -> int:
107
+ """Change stream position and return the new absolute position.
108
+
109
+ Seek to offset relative position indicated by whence:
110
+ * 0: Start of stream (the default). pos should be >= 0;
111
+ * 1: Current position - pos may be negative;
112
+ * 2: End of stream - pos usually negative.
113
+ """
114
+ return self._file.seek(offset, whence)
115
+
116
+ def tell(self) -> int:
117
+ """Return the current position."""
118
+ return self._file.tell()
119
+
120
+ def truncate(self, size: Optional[int] = None) -> int:
121
+ """Resize the stream to the given size in bytes.
122
+
123
+ If size is unspecified resize to the current position.
124
+ The current stream position isn't changed.
125
+
126
+ Return the new file size.
127
+ """
128
+ return self._file.truncate(size)
129
+
130
+ def writable(self) -> bool:
131
+ """Return False."""
132
+ return False
133
+
134
+ def __enter__(self) -> "LazyZipOverHTTP":
135
+ self._file.__enter__()
136
+ return self
137
+
138
+ def __exit__(self, *exc: Any) -> Optional[bool]:
139
+ return self._file.__exit__(*exc)
140
+
141
+ @contextmanager
142
+ def _stay(self) -> Iterator[None]:
143
+ """Return a context manager keeping the position.
144
+
145
+ At the end of the block, seek back to original position.
146
+ """
147
+ pos = self.tell()
148
+ try:
149
+ yield
150
+ finally:
151
+ self.seek(pos)
152
+
153
+ def _check_zip(self) -> None:
154
+ """Check and download until the file is a valid ZIP."""
155
+ end = self._length - 1
156
+ for start in reversed(range(0, end, self._chunk_size)):
157
+ self._download(start, end)
158
+ with self._stay():
159
+ try:
160
+ # For read-only ZIP files, ZipFile only needs
161
+ # methods read, seek, seekable and tell.
162
+ ZipFile(self) # type: ignore
163
+ except BadZipfile:
164
+ pass
165
+ else:
166
+ break
167
+
168
+ def _stream_response(
169
+ self, start: int, end: int, base_headers: Dict[str, str] = HEADERS
170
+ ) -> Response:
171
+ """Return HTTP response to a range request from start to end."""
172
+ headers = base_headers.copy()
173
+ headers["Range"] = f"bytes={start}-{end}"
174
+ # TODO: Get range requests to be correctly cached
175
+ headers["Cache-Control"] = "no-cache"
176
+ return self._session.get(self._url, headers=headers, stream=True)
177
+
178
+ def _merge(
179
+ self, start: int, end: int, left: int, right: int
180
+ ) -> Iterator[Tuple[int, int]]:
181
+ """Return an iterator of intervals to be fetched.
182
+
183
+ Args:
184
+ start (int): Start of needed interval
185
+ end (int): End of needed interval
186
+ left (int): Index of first overlapping downloaded data
187
+ right (int): Index after last overlapping downloaded data
188
+ """
189
+ lslice, rslice = self._left[left:right], self._right[left:right]
190
+ i = start = min([start] + lslice[:1])
191
+ end = max([end] + rslice[-1:])
192
+ for j, k in zip(lslice, rslice):
193
+ if j > i:
194
+ yield i, j - 1
195
+ i = k + 1
196
+ if i <= end:
197
+ yield i, end
198
+ self._left[left:right], self._right[left:right] = [start], [end]
199
+
200
+ def _download(self, start: int, end: int) -> None:
201
+ """Download bytes from start to end inclusively."""
202
+ with self._stay():
203
+ left = bisect_left(self._right, start)
204
+ right = bisect_right(self._left, end)
205
+ for start, end in self._merge(start, end, left, right):
206
+ response = self._stream_response(start, end)
207
+ response.raise_for_status()
208
+ self.seek(start)
209
+ for chunk in response_chunks(response, self._chunk_size):
210
+ self._file.write(chunk)
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/session.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PipSession and supporting code, containing all pip-specific
2
+ network request configuration and behavior.
3
+ """
4
+
5
+ import email.utils
6
+ import io
7
+ import ipaddress
8
+ import json
9
+ import logging
10
+ import mimetypes
11
+ import os
12
+ import platform
13
+ import shutil
14
+ import subprocess
15
+ import sys
16
+ import urllib.parse
17
+ import warnings
18
+ from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
19
+
20
+ from pip._vendor import requests, urllib3
21
+ from pip._vendor.cachecontrol import CacheControlAdapter
22
+ from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
23
+ from pip._vendor.requests.models import PreparedRequest, Response
24
+ from pip._vendor.requests.structures import CaseInsensitiveDict
25
+ from pip._vendor.urllib3.connectionpool import ConnectionPool
26
+ from pip._vendor.urllib3.exceptions import InsecureRequestWarning
27
+
28
+ from pip import __version__
29
+ from pip._internal.metadata import get_default_environment
30
+ from pip._internal.models.link import Link
31
+ from pip._internal.network.auth import MultiDomainBasicAuth
32
+ from pip._internal.network.cache import SafeFileCache
33
+
34
+ # Import ssl from compat so the initial import occurs in only one place.
35
+ from pip._internal.utils.compat import has_tls
36
+ from pip._internal.utils.glibc import libc_ver
37
+ from pip._internal.utils.misc import build_url_from_netloc, parse_netloc
38
+ from pip._internal.utils.urls import url_to_path
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+ SecureOrigin = Tuple[str, str, Optional[Union[int, str]]]
43
+
44
+
45
+ # Ignore warning raised when using --trusted-host.
46
+ warnings.filterwarnings("ignore", category=InsecureRequestWarning)
47
+
48
+
49
+ SECURE_ORIGINS: List[SecureOrigin] = [
50
+ # protocol, hostname, port
51
+ # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
52
+ ("https", "*", "*"),
53
+ ("*", "localhost", "*"),
54
+ ("*", "127.0.0.0/8", "*"),
55
+ ("*", "::1/128", "*"),
56
+ ("file", "*", None),
57
+ # ssh is always secure.
58
+ ("ssh", "*", "*"),
59
+ ]
60
+
61
+
62
+ # These are environment variables present when running under various
63
+ # CI systems. For each variable, some CI systems that use the variable
64
+ # are indicated. The collection was chosen so that for each of a number
65
+ # of popular systems, at least one of the environment variables is used.
66
+ # This list is used to provide some indication of and lower bound for
67
+ # CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
68
+ # For more background, see: https://github.com/pypa/pip/issues/5499
69
+ CI_ENVIRONMENT_VARIABLES = (
70
+ # Azure Pipelines
71
+ "BUILD_BUILDID",
72
+ # Jenkins
73
+ "BUILD_ID",
74
+ # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
75
+ "CI",
76
+ # Explicit environment variable.
77
+ "PIP_IS_CI",
78
+ )
79
+
80
+
81
+ def looks_like_ci() -> bool:
82
+ """
83
+ Return whether it looks like pip is running under CI.
84
+ """
85
+ # We don't use the method of checking for a tty (e.g. using isatty())
86
+ # because some CI systems mimic a tty (e.g. Travis CI). Thus that
87
+ # method doesn't provide definitive information in either direction.
88
+ return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
89
+
90
+
91
+ def user_agent() -> str:
92
+ """
93
+ Return a string representing the user agent.
94
+ """
95
+ data: Dict[str, Any] = {
96
+ "installer": {"name": "pip", "version": __version__},
97
+ "python": platform.python_version(),
98
+ "implementation": {
99
+ "name": platform.python_implementation(),
100
+ },
101
+ }
102
+
103
+ if data["implementation"]["name"] == "CPython":
104
+ data["implementation"]["version"] = platform.python_version()
105
+ elif data["implementation"]["name"] == "PyPy":
106
+ pypy_version_info = sys.pypy_version_info # type: ignore
107
+ if pypy_version_info.releaselevel == "final":
108
+ pypy_version_info = pypy_version_info[:3]
109
+ data["implementation"]["version"] = ".".join(
110
+ [str(x) for x in pypy_version_info]
111
+ )
112
+ elif data["implementation"]["name"] == "Jython":
113
+ # Complete Guess
114
+ data["implementation"]["version"] = platform.python_version()
115
+ elif data["implementation"]["name"] == "IronPython":
116
+ # Complete Guess
117
+ data["implementation"]["version"] = platform.python_version()
118
+
119
+ if sys.platform.startswith("linux"):
120
+ from pip._vendor import distro
121
+
122
+ linux_distribution = distro.name(), distro.version(), distro.codename()
123
+ distro_infos: Dict[str, Any] = dict(
124
+ filter(
125
+ lambda x: x[1],
126
+ zip(["name", "version", "id"], linux_distribution),
127
+ )
128
+ )
129
+ libc = dict(
130
+ filter(
131
+ lambda x: x[1],
132
+ zip(["lib", "version"], libc_ver()),
133
+ )
134
+ )
135
+ if libc:
136
+ distro_infos["libc"] = libc
137
+ if distro_infos:
138
+ data["distro"] = distro_infos
139
+
140
+ if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
141
+ data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
142
+
143
+ if platform.system():
144
+ data.setdefault("system", {})["name"] = platform.system()
145
+
146
+ if platform.release():
147
+ data.setdefault("system", {})["release"] = platform.release()
148
+
149
+ if platform.machine():
150
+ data["cpu"] = platform.machine()
151
+
152
+ if has_tls():
153
+ import _ssl as ssl
154
+
155
+ data["openssl_version"] = ssl.OPENSSL_VERSION
156
+
157
+ setuptools_dist = get_default_environment().get_distribution("setuptools")
158
+ if setuptools_dist is not None:
159
+ data["setuptools_version"] = str(setuptools_dist.version)
160
+
161
+ if shutil.which("rustc") is not None:
162
+ # If for any reason `rustc --version` fails, silently ignore it
163
+ try:
164
+ rustc_output = subprocess.check_output(
165
+ ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5
166
+ )
167
+ except Exception:
168
+ pass
169
+ else:
170
+ if rustc_output.startswith(b"rustc "):
171
+ # The format of `rustc --version` is:
172
+ # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'`
173
+ # We extract just the middle (1.52.1) part
174
+ data["rustc_version"] = rustc_output.split(b" ")[1].decode()
175
+
176
+ # Use None rather than False so as not to give the impression that
177
+ # pip knows it is not being run under CI. Rather, it is a null or
178
+ # inconclusive result. Also, we include some value rather than no
179
+ # value to make it easier to know that the check has been run.
180
+ data["ci"] = True if looks_like_ci() else None
181
+
182
+ user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
183
+ if user_data is not None:
184
+ data["user_data"] = user_data
185
+
186
+ return "{data[installer][name]}/{data[installer][version]} {json}".format(
187
+ data=data,
188
+ json=json.dumps(data, separators=(",", ":"), sort_keys=True),
189
+ )
190
+
191
+
192
+ class LocalFSAdapter(BaseAdapter):
193
+ def send(
194
+ self,
195
+ request: PreparedRequest,
196
+ stream: bool = False,
197
+ timeout: Optional[Union[float, Tuple[float, float]]] = None,
198
+ verify: Union[bool, str] = True,
199
+ cert: Optional[Union[str, Tuple[str, str]]] = None,
200
+ proxies: Optional[Mapping[str, str]] = None,
201
+ ) -> Response:
202
+ pathname = url_to_path(request.url)
203
+
204
+ resp = Response()
205
+ resp.status_code = 200
206
+ resp.url = request.url
207
+
208
+ try:
209
+ stats = os.stat(pathname)
210
+ except OSError as exc:
211
+ # format the exception raised as a io.BytesIO object,
212
+ # to return a better error message:
213
+ resp.status_code = 404
214
+ resp.reason = type(exc).__name__
215
+ resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode("utf8"))
216
+ else:
217
+ modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
218
+ content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
219
+ resp.headers = CaseInsensitiveDict(
220
+ {
221
+ "Content-Type": content_type,
222
+ "Content-Length": stats.st_size,
223
+ "Last-Modified": modified,
224
+ }
225
+ )
226
+
227
+ resp.raw = open(pathname, "rb")
228
+ resp.close = resp.raw.close
229
+
230
+ return resp
231
+
232
+ def close(self) -> None:
233
+ pass
234
+
235
+
236
+ class InsecureHTTPAdapter(HTTPAdapter):
237
+ def cert_verify(
238
+ self,
239
+ conn: ConnectionPool,
240
+ url: str,
241
+ verify: Union[bool, str],
242
+ cert: Optional[Union[str, Tuple[str, str]]],
243
+ ) -> None:
244
+ super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
245
+
246
+
247
+ class InsecureCacheControlAdapter(CacheControlAdapter):
248
+ def cert_verify(
249
+ self,
250
+ conn: ConnectionPool,
251
+ url: str,
252
+ verify: Union[bool, str],
253
+ cert: Optional[Union[str, Tuple[str, str]]],
254
+ ) -> None:
255
+ super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
256
+
257
+
258
+ class PipSession(requests.Session):
259
+
260
+ timeout: Optional[int] = None
261
+
262
+ def __init__(
263
+ self,
264
+ *args: Any,
265
+ retries: int = 0,
266
+ cache: Optional[str] = None,
267
+ trusted_hosts: Sequence[str] = (),
268
+ index_urls: Optional[List[str]] = None,
269
+ **kwargs: Any,
270
+ ) -> None:
271
+ """
272
+ :param trusted_hosts: Domains not to emit warnings for when not using
273
+ HTTPS.
274
+ """
275
+ super().__init__(*args, **kwargs)
276
+
277
+ # Namespace the attribute with "pip_" just in case to prevent
278
+ # possible conflicts with the base class.
279
+ self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = []
280
+
281
+ # Attach our User Agent to the request
282
+ self.headers["User-Agent"] = user_agent()
283
+
284
+ # Attach our Authentication handler to the session
285
+ self.auth = MultiDomainBasicAuth(index_urls=index_urls)
286
+
287
+ # Create our urllib3.Retry instance which will allow us to customize
288
+ # how we handle retries.
289
+ retries = urllib3.Retry(
290
+ # Set the total number of retries that a particular request can
291
+ # have.
292
+ total=retries,
293
+ # A 503 error from PyPI typically means that the Fastly -> Origin
294
+ # connection got interrupted in some way. A 503 error in general
295
+ # is typically considered a transient error so we'll go ahead and
296
+ # retry it.
297
+ # A 500 may indicate transient error in Amazon S3
298
+ # A 520 or 527 - may indicate transient error in CloudFlare
299
+ status_forcelist=[500, 503, 520, 527],
300
+ # Add a small amount of back off between failed requests in
301
+ # order to prevent hammering the service.
302
+ backoff_factor=0.25,
303
+ ) # type: ignore
304
+
305
+ # Our Insecure HTTPAdapter disables HTTPS validation. It does not
306
+ # support caching so we'll use it for all http:// URLs.
307
+ # If caching is disabled, we will also use it for
308
+ # https:// hosts that we've marked as ignoring
309
+ # TLS errors for (trusted-hosts).
310
+ insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
311
+
312
+ # We want to _only_ cache responses on securely fetched origins or when
313
+ # the host is specified as trusted. We do this because
314
+ # we can't validate the response of an insecurely/untrusted fetched
315
+ # origin, and we don't want someone to be able to poison the cache and
316
+ # require manual eviction from the cache to fix it.
317
+ if cache:
318
+ secure_adapter = CacheControlAdapter(
319
+ cache=SafeFileCache(cache),
320
+ max_retries=retries,
321
+ )
322
+ self._trusted_host_adapter = InsecureCacheControlAdapter(
323
+ cache=SafeFileCache(cache),
324
+ max_retries=retries,
325
+ )
326
+ else:
327
+ secure_adapter = HTTPAdapter(max_retries=retries)
328
+ self._trusted_host_adapter = insecure_adapter
329
+
330
+ self.mount("https://", secure_adapter)
331
+ self.mount("http://", insecure_adapter)
332
+
333
+ # Enable file:// urls
334
+ self.mount("file://", LocalFSAdapter())
335
+
336
+ for host in trusted_hosts:
337
+ self.add_trusted_host(host, suppress_logging=True)
338
+
339
+ def update_index_urls(self, new_index_urls: List[str]) -> None:
340
+ """
341
+ :param new_index_urls: New index urls to update the authentication
342
+ handler with.
343
+ """
344
+ self.auth.index_urls = new_index_urls
345
+
346
+ def add_trusted_host(
347
+ self, host: str, source: Optional[str] = None, suppress_logging: bool = False
348
+ ) -> None:
349
+ """
350
+ :param host: It is okay to provide a host that has previously been
351
+ added.
352
+ :param source: An optional source string, for logging where the host
353
+ string came from.
354
+ """
355
+ if not suppress_logging:
356
+ msg = f"adding trusted host: {host!r}"
357
+ if source is not None:
358
+ msg += f" (from {source})"
359
+ logger.info(msg)
360
+
361
+ host_port = parse_netloc(host)
362
+ if host_port not in self.pip_trusted_origins:
363
+ self.pip_trusted_origins.append(host_port)
364
+
365
+ self.mount(
366
+ build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter
367
+ )
368
+ self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter)
369
+ if not host_port[1]:
370
+ self.mount(
371
+ build_url_from_netloc(host, scheme="http") + ":",
372
+ self._trusted_host_adapter,
373
+ )
374
+ # Mount wildcard ports for the same host.
375
+ self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)
376
+
377
+ def iter_secure_origins(self) -> Iterator[SecureOrigin]:
378
+ yield from SECURE_ORIGINS
379
+ for host, port in self.pip_trusted_origins:
380
+ yield ("*", host, "*" if port is None else port)
381
+
382
+ def is_secure_origin(self, location: Link) -> bool:
383
+ # Determine if this url used a secure transport mechanism
384
+ parsed = urllib.parse.urlparse(str(location))
385
+ origin_protocol, origin_host, origin_port = (
386
+ parsed.scheme,
387
+ parsed.hostname,
388
+ parsed.port,
389
+ )
390
+
391
+ # The protocol to use to see if the protocol matches.
392
+ # Don't count the repository type as part of the protocol: in
393
+ # cases such as "git+ssh", only use "ssh". (I.e., Only verify against
394
+ # the last scheme.)
395
+ origin_protocol = origin_protocol.rsplit("+", 1)[-1]
396
+
397
+ # Determine if our origin is a secure origin by looking through our
398
+ # hardcoded list of secure origins, as well as any additional ones
399
+ # configured on this PackageFinder instance.
400
+ for secure_origin in self.iter_secure_origins():
401
+ secure_protocol, secure_host, secure_port = secure_origin
402
+ if origin_protocol != secure_protocol and secure_protocol != "*":
403
+ continue
404
+
405
+ try:
406
+ addr = ipaddress.ip_address(origin_host)
407
+ network = ipaddress.ip_network(secure_host)
408
+ except ValueError:
409
+ # We don't have both a valid address or a valid network, so
410
+ # we'll check this origin against hostnames.
411
+ if (
412
+ origin_host
413
+ and origin_host.lower() != secure_host.lower()
414
+ and secure_host != "*"
415
+ ):
416
+ continue
417
+ else:
418
+ # We have a valid address and network, so see if the address
419
+ # is contained within the network.
420
+ if addr not in network:
421
+ continue
422
+
423
+ # Check to see if the port matches.
424
+ if (
425
+ origin_port != secure_port
426
+ and secure_port != "*"
427
+ and secure_port is not None
428
+ ):
429
+ continue
430
+
431
+ # If we've gotten here, then this origin matches the current
432
+ # secure origin and we should return True
433
+ return True
434
+
435
+ # If we've gotten to this point, then the origin isn't secure and we
436
+ # will not accept it as a valid location to search. We will however
437
+ # log a warning that we are ignoring it.
438
+ logger.warning(
439
+ "The repository located at %s is not a trusted or secure host and "
440
+ "is being ignored. If this repository is available via HTTPS we "
441
+ "recommend you use HTTPS instead, otherwise you may silence "
442
+ "this warning and allow it anyway with '--trusted-host %s'.",
443
+ origin_host,
444
+ origin_host,
445
+ )
446
+
447
+ return False
448
+
449
+ def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:
450
+ # Allow setting a default timeout on a session
451
+ kwargs.setdefault("timeout", self.timeout)
452
+
453
+ # Dispatch the actual request
454
+ return super().request(method, url, *args, **kwargs)
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/utils.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator
2
+
3
+ from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
4
+
5
+ from pip._internal.exceptions import NetworkConnectionError
6
+
7
+ # The following comments and HTTP headers were originally added by
8
+ # Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.
9
+ #
10
+ # We use Accept-Encoding: identity here because requests defaults to
11
+ # accepting compressed responses. This breaks in a variety of ways
12
+ # depending on how the server is configured.
13
+ # - Some servers will notice that the file isn't a compressible file
14
+ # and will leave the file alone and with an empty Content-Encoding
15
+ # - Some servers will notice that the file is already compressed and
16
+ # will leave the file alone, adding a Content-Encoding: gzip header
17
+ # - Some servers won't notice anything at all and will take a file
18
+ # that's already been compressed and compress it again, and set
19
+ # the Content-Encoding: gzip header
20
+ # By setting this to request only the identity encoding we're hoping
21
+ # to eliminate the third case. Hopefully there does not exist a server
22
+ # which when given a file will notice it is already compressed and that
23
+ # you're not asking for a compressed file and will then decompress it
24
+ # before sending because if that's the case I don't think it'll ever be
25
+ # possible to make this work.
26
+ HEADERS: Dict[str, str] = {"Accept-Encoding": "identity"}
27
+
28
+
29
+ def raise_for_status(resp: Response) -> None:
30
+ http_error_msg = ""
31
+ if isinstance(resp.reason, bytes):
32
+ # We attempt to decode utf-8 first because some servers
33
+ # choose to localize their reason strings. If the string
34
+ # isn't utf-8, we fall back to iso-8859-1 for all other
35
+ # encodings.
36
+ try:
37
+ reason = resp.reason.decode("utf-8")
38
+ except UnicodeDecodeError:
39
+ reason = resp.reason.decode("iso-8859-1")
40
+ else:
41
+ reason = resp.reason
42
+
43
+ if 400 <= resp.status_code < 500:
44
+ http_error_msg = (
45
+ f"{resp.status_code} Client Error: {reason} for url: {resp.url}"
46
+ )
47
+
48
+ elif 500 <= resp.status_code < 600:
49
+ http_error_msg = (
50
+ f"{resp.status_code} Server Error: {reason} for url: {resp.url}"
51
+ )
52
+
53
+ if http_error_msg:
54
+ raise NetworkConnectionError(http_error_msg, response=resp)
55
+
56
+
57
+ def response_chunks(
58
+ response: Response, chunk_size: int = CONTENT_CHUNK_SIZE
59
+ ) -> Iterator[bytes]:
60
+ """Given a requests Response, provide the data chunks."""
61
+ try:
62
+ # Special case for urllib3.
63
+ for chunk in response.raw.stream(
64
+ chunk_size,
65
+ # We use decode_content=False here because we don't
66
+ # want urllib3 to mess with the raw bytes we get
67
+ # from the server. If we decompress inside of
68
+ # urllib3 then we cannot verify the checksum
69
+ # because the checksum will be of the compressed
70
+ # file. This breakage will only occur if the
71
+ # server adds a Content-Encoding header, which
72
+ # depends on how the server was configured:
73
+ # - Some servers will notice that the file isn't a
74
+ # compressible file and will leave the file alone
75
+ # and with an empty Content-Encoding
76
+ # - Some servers will notice that the file is
77
+ # already compressed and will leave the file
78
+ # alone and will add a Content-Encoding: gzip
79
+ # header
80
+ # - Some servers won't notice anything at all and
81
+ # will take a file that's already been compressed
82
+ # and compress it again and set the
83
+ # Content-Encoding: gzip header
84
+ #
85
+ # By setting this not to decode automatically we
86
+ # hope to eliminate problems with the second case.
87
+ decode_content=False,
88
+ ):
89
+ yield chunk
90
+ except AttributeError:
91
+ # Standard file-like object.
92
+ while True:
93
+ chunk = response.raw.read(chunk_size)
94
+ if not chunk:
95
+ break
96
+ yield chunk
llmeval-env/lib/python3.10/site-packages/pip/_internal/network/xmlrpc.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """xmlrpclib.Transport implementation
2
+ """
3
+
4
+ import logging
5
+ import urllib.parse
6
+ import xmlrpc.client
7
+ from typing import TYPE_CHECKING, Tuple
8
+
9
+ from pip._internal.exceptions import NetworkConnectionError
10
+ from pip._internal.network.session import PipSession
11
+ from pip._internal.network.utils import raise_for_status
12
+
13
+ if TYPE_CHECKING:
14
+ from xmlrpc.client import _HostType, _Marshallable
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class PipXmlrpcTransport(xmlrpc.client.Transport):
20
+ """Provide a `xmlrpclib.Transport` implementation via a `PipSession`
21
+ object.
22
+ """
23
+
24
+ def __init__(
25
+ self, index_url: str, session: PipSession, use_datetime: bool = False
26
+ ) -> None:
27
+ super().__init__(use_datetime)
28
+ index_parts = urllib.parse.urlparse(index_url)
29
+ self._scheme = index_parts.scheme
30
+ self._session = session
31
+
32
+ def request(
33
+ self,
34
+ host: "_HostType",
35
+ handler: str,
36
+ request_body: bytes,
37
+ verbose: bool = False,
38
+ ) -> Tuple["_Marshallable", ...]:
39
+ assert isinstance(host, str)
40
+ parts = (self._scheme, host, handler, None, None, None)
41
+ url = urllib.parse.urlunparse(parts)
42
+ try:
43
+ headers = {"Content-Type": "text/xml"}
44
+ response = self._session.post(
45
+ url,
46
+ data=request_body,
47
+ headers=headers,
48
+ stream=True,
49
+ )
50
+ raise_for_status(response)
51
+ self.verbose = verbose
52
+ return self.parse_response(response.raw)
53
+ except NetworkConnectionError as exc:
54
+ assert exc.response
55
+ logger.critical(
56
+ "HTTP error %s while getting %s",
57
+ exc.response.status_code,
58
+ url,
59
+ )
60
+ raise
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/check.cpython-310.pyc ADDED
Binary file (4.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-310.pyc ADDED
Binary file (6.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/build/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (1.43 kB). View file