applied-ai-018 commited on
Commit
8be7cea
·
verified ·
1 Parent(s): 51b4b82

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/examples/__init__.py +0 -0
  2. llmeval-env/lib/python3.10/site-packages/examples/basic_example.py +60 -0
  3. llmeval-env/lib/python3.10/site-packages/requests/__init__.py +180 -0
  4. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/_internal_utils.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/api.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/auth.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/hooks.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/sessions.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/structures.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/requests/__pycache__/utils.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/requests/cookies.py +561 -0
  15. llmeval-env/lib/python3.10/site-packages/requests/hooks.py +33 -0
  16. llmeval-env/lib/python3.10/site-packages/requests/models.py +1034 -0
  17. llmeval-env/lib/python3.10/site-packages/requests/sessions.py +833 -0
  18. llmeval-env/lib/python3.10/site-packages/requests/utils.py +1094 -0
  19. llmeval-env/lib/python3.10/site-packages/rouge_score/create_pyrouge_files.py +83 -0
  20. llmeval-env/lib/python3.10/site-packages/rouge_score/rouge.py +89 -0
  21. llmeval-env/lib/python3.10/site-packages/rouge_score/rouge_scorer_test.py +314 -0
  22. llmeval-env/lib/python3.10/site-packages/rouge_score/scoring.py +167 -0
  23. llmeval-env/lib/python3.10/site-packages/rouge_score/scoring_test.py +182 -0
  24. llmeval-env/lib/python3.10/site-packages/rouge_score/test_util.py +40 -0
  25. llmeval-env/lib/python3.10/site-packages/rouge_score/tokenize.py +62 -0
  26. llmeval-env/lib/python3.10/site-packages/rouge_score/tokenize_test.py +35 -0
  27. llmeval-env/lib/python3.10/site-packages/rouge_score/tokenizers.py +50 -0
  28. llmeval-env/lib/python3.10/site-packages/urllib3/__init__.py +211 -0
  29. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/__init__.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/_base_connection.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/_request_methods.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/_version.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/connectionpool.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/exceptions.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/fields.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/response.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/urllib3/_collections.py +483 -0
  38. llmeval-env/lib/python3.10/site-packages/urllib3/_request_methods.py +279 -0
  39. llmeval-env/lib/python3.10/site-packages/urllib3/_version.py +4 -0
  40. llmeval-env/lib/python3.10/site-packages/urllib3/connection.py +930 -0
  41. llmeval-env/lib/python3.10/site-packages/urllib3/connectionpool.py +1186 -0
  42. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/__pycache__/__init__.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/__pycache__/socks.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/connection.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/fetch.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/request.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/response.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/fetch.py +418 -0
llmeval-env/lib/python3.10/site-packages/examples/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/examples/basic_example.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from time import sleep
2
+ import multiprocessing
3
+
4
+ import logging
5
+ from tqdm_multiprocess.logger import setup_logger_tqdm
6
+ logger = logging.getLogger(__name__)
7
+
8
+ from tqdm_multiprocess import TqdmMultiProcessPool
9
+
10
+ def some_other_function(tqdm_func):
11
+ iterations1 = 100
12
+ iterations2 = 5
13
+ iterations3 = 2
14
+
15
+ total_iterations = iterations1 * iterations2 * iterations3
16
+ with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress3:
17
+ progress3.set_description("outer")
18
+ for i in range(iterations3):
19
+ logger.info("outer")
20
+ total_iterations = iterations1 * iterations2
21
+ with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress2:
22
+ progress2.set_description("middle")
23
+ for j in range(iterations2):
24
+ logger.info("middle")
25
+ #for k in tqdm_func(range(iterations1), dynamic_ncols=True, desc="inner"):
26
+ with tqdm_func(total=iterations1, dynamic_ncols=True) as progress1:
27
+ for j in range(iterations1):
28
+ # logger.info("inner") # Spam slows down tqdm too much
29
+ progress1.set_description("innert")
30
+ sleep(0.01)
31
+ progress1.update()
32
+ progress2.update()
33
+ progress3.update()
34
+
35
+ logger.warning(f"Warning test message. {multiprocessing.current_process().name}")
36
+ logger.error(f"Error test message. {multiprocessing.current_process().name}")
37
+
38
+
39
+ # Multiprocessed
40
+ def example_multiprocessing_function(some_input, tqdm_func):
41
+ logger.debug(f"Debug test message - I won't show up in console. {multiprocessing.current_process().name}")
42
+ logger.info(f"Info test message. {multiprocessing.current_process().name}")
43
+ some_other_function(tqdm_func)
44
+ return True
45
+
46
+ def error_callback():
47
+ print("Error!")
48
+
49
+ def example():
50
+ pool = TqdmMultiProcessPool()
51
+ process_count = 4
52
+ task_count = 10
53
+ initial_tasks = [(example_multiprocessing_function, (i,)) for i in range(task_count)]
54
+ results = pool.map(process_count, initial_tasks, error_callback)
55
+ print(results)
56
+
57
+ if __name__ == '__main__':
58
+ logfile_path = "tqdm_multiprocessing_example.log"
59
+ setup_logger_tqdm(logfile_path) # Logger will write messages using tqdm.write
60
+ example()
llmeval-env/lib/python3.10/site-packages/requests/__init__.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # __
2
+ # /__) _ _ _ _ _/ _
3
+ # / ( (- (/ (/ (- _) / _)
4
+ # /
5
+
6
+ """
7
+ Requests HTTP Library
8
+ ~~~~~~~~~~~~~~~~~~~~~
9
+
10
+ Requests is an HTTP library, written in Python, for human beings.
11
+ Basic GET usage:
12
+
13
+ >>> import requests
14
+ >>> r = requests.get('https://www.python.org')
15
+ >>> r.status_code
16
+ 200
17
+ >>> b'Python is a programming language' in r.content
18
+ True
19
+
20
+ ... or POST:
21
+
22
+ >>> payload = dict(key1='value1', key2='value2')
23
+ >>> r = requests.post('https://httpbin.org/post', data=payload)
24
+ >>> print(r.text)
25
+ {
26
+ ...
27
+ "form": {
28
+ "key1": "value1",
29
+ "key2": "value2"
30
+ },
31
+ ...
32
+ }
33
+
34
+ The other HTTP methods are supported - see `requests.api`. Full documentation
35
+ is at <https://requests.readthedocs.io>.
36
+
37
+ :copyright: (c) 2017 by Kenneth Reitz.
38
+ :license: Apache 2.0, see LICENSE for more details.
39
+ """
40
+
41
+ import warnings
42
+
43
+ import urllib3
44
+
45
+ from .exceptions import RequestsDependencyWarning
46
+
47
+ try:
48
+ from charset_normalizer import __version__ as charset_normalizer_version
49
+ except ImportError:
50
+ charset_normalizer_version = None
51
+
52
+ try:
53
+ from chardet import __version__ as chardet_version
54
+ except ImportError:
55
+ chardet_version = None
56
+
57
+
58
+ def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
59
+ urllib3_version = urllib3_version.split(".")
60
+ assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git.
61
+
62
+ # Sometimes, urllib3 only reports its version as 16.1.
63
+ if len(urllib3_version) == 2:
64
+ urllib3_version.append("0")
65
+
66
+ # Check urllib3 for compatibility.
67
+ major, minor, patch = urllib3_version # noqa: F811
68
+ major, minor, patch = int(major), int(minor), int(patch)
69
+ # urllib3 >= 1.21.1
70
+ assert major >= 1
71
+ if major == 1:
72
+ assert minor >= 21
73
+
74
+ # Check charset_normalizer for compatibility.
75
+ if chardet_version:
76
+ major, minor, patch = chardet_version.split(".")[:3]
77
+ major, minor, patch = int(major), int(minor), int(patch)
78
+ # chardet_version >= 3.0.2, < 6.0.0
79
+ assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0)
80
+ elif charset_normalizer_version:
81
+ major, minor, patch = charset_normalizer_version.split(".")[:3]
82
+ major, minor, patch = int(major), int(minor), int(patch)
83
+ # charset_normalizer >= 2.0.0 < 4.0.0
84
+ assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0)
85
+ else:
86
+ raise Exception("You need either charset_normalizer or chardet installed")
87
+
88
+
89
+ def _check_cryptography(cryptography_version):
90
+ # cryptography < 1.3.4
91
+ try:
92
+ cryptography_version = list(map(int, cryptography_version.split(".")))
93
+ except ValueError:
94
+ return
95
+
96
+ if cryptography_version < [1, 3, 4]:
97
+ warning = "Old version of cryptography ({}) may cause slowdown.".format(
98
+ cryptography_version
99
+ )
100
+ warnings.warn(warning, RequestsDependencyWarning)
101
+
102
+
103
+ # Check imported dependencies for compatibility.
104
+ try:
105
+ check_compatibility(
106
+ urllib3.__version__, chardet_version, charset_normalizer_version
107
+ )
108
+ except (AssertionError, ValueError):
109
+ warnings.warn(
110
+ "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
111
+ "version!".format(
112
+ urllib3.__version__, chardet_version, charset_normalizer_version
113
+ ),
114
+ RequestsDependencyWarning,
115
+ )
116
+
117
+ # Attempt to enable urllib3's fallback for SNI support
118
+ # if the standard library doesn't support SNI or the
119
+ # 'ssl' library isn't available.
120
+ try:
121
+ try:
122
+ import ssl
123
+ except ImportError:
124
+ ssl = None
125
+
126
+ if not getattr(ssl, "HAS_SNI", False):
127
+ from urllib3.contrib import pyopenssl
128
+
129
+ pyopenssl.inject_into_urllib3()
130
+
131
+ # Check cryptography version
132
+ from cryptography import __version__ as cryptography_version
133
+
134
+ _check_cryptography(cryptography_version)
135
+ except ImportError:
136
+ pass
137
+
138
+ # urllib3's DependencyWarnings should be silenced.
139
+ from urllib3.exceptions import DependencyWarning
140
+
141
+ warnings.simplefilter("ignore", DependencyWarning)
142
+
143
+ # Set default logging handler to avoid "No handler found" warnings.
144
+ import logging
145
+ from logging import NullHandler
146
+
147
+ from . import packages, utils
148
+ from .__version__ import (
149
+ __author__,
150
+ __author_email__,
151
+ __build__,
152
+ __cake__,
153
+ __copyright__,
154
+ __description__,
155
+ __license__,
156
+ __title__,
157
+ __url__,
158
+ __version__,
159
+ )
160
+ from .api import delete, get, head, options, patch, post, put, request
161
+ from .exceptions import (
162
+ ConnectionError,
163
+ ConnectTimeout,
164
+ FileModeWarning,
165
+ HTTPError,
166
+ JSONDecodeError,
167
+ ReadTimeout,
168
+ RequestException,
169
+ Timeout,
170
+ TooManyRedirects,
171
+ URLRequired,
172
+ )
173
+ from .models import PreparedRequest, Request, Response
174
+ from .sessions import Session, session
175
+ from .status_codes import codes
176
+
177
+ logging.getLogger(__name__).addHandler(NullHandler())
178
+
179
+ # FileModeWarnings go off per the default.
180
+ warnings.simplefilter("default", FileModeWarning, append=True)
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc ADDED
Binary file (539 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/_internal_utils.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/api.cpython-310.pyc ADDED
Binary file (6.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/auth.cpython-310.pyc ADDED
Binary file (8.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (983 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc ADDED
Binary file (710 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/sessions.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/structures.cpython-310.pyc ADDED
Binary file (4.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/requests/__pycache__/utils.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/requests/cookies.py ADDED
@@ -0,0 +1,561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.cookies
3
+ ~~~~~~~~~~~~~~~~
4
+
5
+ Compatibility code to be able to use `cookielib.CookieJar` with requests.
6
+
7
+ requests.utils imports from here, so be careful with imports.
8
+ """
9
+
10
+ import calendar
11
+ import copy
12
+ import time
13
+
14
+ from ._internal_utils import to_native_string
15
+ from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse
16
+
17
+ try:
18
+ import threading
19
+ except ImportError:
20
+ import dummy_threading as threading
21
+
22
+
23
+ class MockRequest:
24
+ """Wraps a `requests.Request` to mimic a `urllib2.Request`.
25
+
26
+ The code in `cookielib.CookieJar` expects this interface in order to correctly
27
+ manage cookie policies, i.e., determine whether a cookie can be set, given the
28
+ domains of the request and the cookie.
29
+
30
+ The original request object is read-only. The client is responsible for collecting
31
+ the new headers via `get_new_headers()` and interpreting them appropriately. You
32
+ probably want `get_cookie_header`, defined below.
33
+ """
34
+
35
+ def __init__(self, request):
36
+ self._r = request
37
+ self._new_headers = {}
38
+ self.type = urlparse(self._r.url).scheme
39
+
40
+ def get_type(self):
41
+ return self.type
42
+
43
+ def get_host(self):
44
+ return urlparse(self._r.url).netloc
45
+
46
+ def get_origin_req_host(self):
47
+ return self.get_host()
48
+
49
+ def get_full_url(self):
50
+ # Only return the response's URL if the user hadn't set the Host
51
+ # header
52
+ if not self._r.headers.get("Host"):
53
+ return self._r.url
54
+ # If they did set it, retrieve it and reconstruct the expected domain
55
+ host = to_native_string(self._r.headers["Host"], encoding="utf-8")
56
+ parsed = urlparse(self._r.url)
57
+ # Reconstruct the URL as we expect it
58
+ return urlunparse(
59
+ [
60
+ parsed.scheme,
61
+ host,
62
+ parsed.path,
63
+ parsed.params,
64
+ parsed.query,
65
+ parsed.fragment,
66
+ ]
67
+ )
68
+
69
+ def is_unverifiable(self):
70
+ return True
71
+
72
+ def has_header(self, name):
73
+ return name in self._r.headers or name in self._new_headers
74
+
75
+ def get_header(self, name, default=None):
76
+ return self._r.headers.get(name, self._new_headers.get(name, default))
77
+
78
+ def add_header(self, key, val):
79
+ """cookielib has no legitimate use for this method; add it back if you find one."""
80
+ raise NotImplementedError(
81
+ "Cookie headers should be added with add_unredirected_header()"
82
+ )
83
+
84
+ def add_unredirected_header(self, name, value):
85
+ self._new_headers[name] = value
86
+
87
+ def get_new_headers(self):
88
+ return self._new_headers
89
+
90
+ @property
91
+ def unverifiable(self):
92
+ return self.is_unverifiable()
93
+
94
+ @property
95
+ def origin_req_host(self):
96
+ return self.get_origin_req_host()
97
+
98
+ @property
99
+ def host(self):
100
+ return self.get_host()
101
+
102
+
103
+ class MockResponse:
104
+ """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
105
+
106
+ ...what? Basically, expose the parsed HTTP headers from the server response
107
+ the way `cookielib` expects to see them.
108
+ """
109
+
110
+ def __init__(self, headers):
111
+ """Make a MockResponse for `cookielib` to read.
112
+
113
+ :param headers: a httplib.HTTPMessage or analogous carrying the headers
114
+ """
115
+ self._headers = headers
116
+
117
+ def info(self):
118
+ return self._headers
119
+
120
+ def getheaders(self, name):
121
+ self._headers.getheaders(name)
122
+
123
+
124
+ def extract_cookies_to_jar(jar, request, response):
125
+ """Extract the cookies from the response into a CookieJar.
126
+
127
+ :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
128
+ :param request: our own requests.Request object
129
+ :param response: urllib3.HTTPResponse object
130
+ """
131
+ if not (hasattr(response, "_original_response") and response._original_response):
132
+ return
133
+ # the _original_response field is the wrapped httplib.HTTPResponse object,
134
+ req = MockRequest(request)
135
+ # pull out the HTTPMessage with the headers and put it in the mock:
136
+ res = MockResponse(response._original_response.msg)
137
+ jar.extract_cookies(res, req)
138
+
139
+
140
+ def get_cookie_header(jar, request):
141
+ """
142
+ Produce an appropriate Cookie header string to be sent with `request`, or None.
143
+
144
+ :rtype: str
145
+ """
146
+ r = MockRequest(request)
147
+ jar.add_cookie_header(r)
148
+ return r.get_new_headers().get("Cookie")
149
+
150
+
151
+ def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
152
+ """Unsets a cookie by name, by default over all domains and paths.
153
+
154
+ Wraps CookieJar.clear(), is O(n).
155
+ """
156
+ clearables = []
157
+ for cookie in cookiejar:
158
+ if cookie.name != name:
159
+ continue
160
+ if domain is not None and domain != cookie.domain:
161
+ continue
162
+ if path is not None and path != cookie.path:
163
+ continue
164
+ clearables.append((cookie.domain, cookie.path, cookie.name))
165
+
166
+ for domain, path, name in clearables:
167
+ cookiejar.clear(domain, path, name)
168
+
169
+
170
+ class CookieConflictError(RuntimeError):
171
+ """There are two cookies that meet the criteria specified in the cookie jar.
172
+ Use .get and .set and include domain and path args in order to be more specific.
173
+ """
174
+
175
+
176
+ class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
177
+ """Compatibility class; is a cookielib.CookieJar, but exposes a dict
178
+ interface.
179
+
180
+ This is the CookieJar we create by default for requests and sessions that
181
+ don't specify one, since some clients may expect response.cookies and
182
+ session.cookies to support dict operations.
183
+
184
+ Requests does not use the dict interface internally; it's just for
185
+ compatibility with external client code. All requests code should work
186
+ out of the box with externally provided instances of ``CookieJar``, e.g.
187
+ ``LWPCookieJar`` and ``FileCookieJar``.
188
+
189
+ Unlike a regular CookieJar, this class is pickleable.
190
+
191
+ .. warning:: dictionary operations that are normally O(1) may be O(n).
192
+ """
193
+
194
+ def get(self, name, default=None, domain=None, path=None):
195
+ """Dict-like get() that also supports optional domain and path args in
196
+ order to resolve naming collisions from using one cookie jar over
197
+ multiple domains.
198
+
199
+ .. warning:: operation is O(n), not O(1).
200
+ """
201
+ try:
202
+ return self._find_no_duplicates(name, domain, path)
203
+ except KeyError:
204
+ return default
205
+
206
+ def set(self, name, value, **kwargs):
207
+ """Dict-like set() that also supports optional domain and path args in
208
+ order to resolve naming collisions from using one cookie jar over
209
+ multiple domains.
210
+ """
211
+ # support client code that unsets cookies by assignment of a None value:
212
+ if value is None:
213
+ remove_cookie_by_name(
214
+ self, name, domain=kwargs.get("domain"), path=kwargs.get("path")
215
+ )
216
+ return
217
+
218
+ if isinstance(value, Morsel):
219
+ c = morsel_to_cookie(value)
220
+ else:
221
+ c = create_cookie(name, value, **kwargs)
222
+ self.set_cookie(c)
223
+ return c
224
+
225
+ def iterkeys(self):
226
+ """Dict-like iterkeys() that returns an iterator of names of cookies
227
+ from the jar.
228
+
229
+ .. seealso:: itervalues() and iteritems().
230
+ """
231
+ for cookie in iter(self):
232
+ yield cookie.name
233
+
234
+ def keys(self):
235
+ """Dict-like keys() that returns a list of names of cookies from the
236
+ jar.
237
+
238
+ .. seealso:: values() and items().
239
+ """
240
+ return list(self.iterkeys())
241
+
242
+ def itervalues(self):
243
+ """Dict-like itervalues() that returns an iterator of values of cookies
244
+ from the jar.
245
+
246
+ .. seealso:: iterkeys() and iteritems().
247
+ """
248
+ for cookie in iter(self):
249
+ yield cookie.value
250
+
251
+ def values(self):
252
+ """Dict-like values() that returns a list of values of cookies from the
253
+ jar.
254
+
255
+ .. seealso:: keys() and items().
256
+ """
257
+ return list(self.itervalues())
258
+
259
+ def iteritems(self):
260
+ """Dict-like iteritems() that returns an iterator of name-value tuples
261
+ from the jar.
262
+
263
+ .. seealso:: iterkeys() and itervalues().
264
+ """
265
+ for cookie in iter(self):
266
+ yield cookie.name, cookie.value
267
+
268
+ def items(self):
269
+ """Dict-like items() that returns a list of name-value tuples from the
270
+ jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
271
+ vanilla python dict of key value pairs.
272
+
273
+ .. seealso:: keys() and values().
274
+ """
275
+ return list(self.iteritems())
276
+
277
+ def list_domains(self):
278
+ """Utility method to list all the domains in the jar."""
279
+ domains = []
280
+ for cookie in iter(self):
281
+ if cookie.domain not in domains:
282
+ domains.append(cookie.domain)
283
+ return domains
284
+
285
+ def list_paths(self):
286
+ """Utility method to list all the paths in the jar."""
287
+ paths = []
288
+ for cookie in iter(self):
289
+ if cookie.path not in paths:
290
+ paths.append(cookie.path)
291
+ return paths
292
+
293
+ def multiple_domains(self):
294
+ """Returns True if there are multiple domains in the jar.
295
+ Returns False otherwise.
296
+
297
+ :rtype: bool
298
+ """
299
+ domains = []
300
+ for cookie in iter(self):
301
+ if cookie.domain is not None and cookie.domain in domains:
302
+ return True
303
+ domains.append(cookie.domain)
304
+ return False # there is only one domain in jar
305
+
306
+ def get_dict(self, domain=None, path=None):
307
+ """Takes as an argument an optional domain and path and returns a plain
308
+ old Python dict of name-value pairs of cookies that meet the
309
+ requirements.
310
+
311
+ :rtype: dict
312
+ """
313
+ dictionary = {}
314
+ for cookie in iter(self):
315
+ if (domain is None or cookie.domain == domain) and (
316
+ path is None or cookie.path == path
317
+ ):
318
+ dictionary[cookie.name] = cookie.value
319
+ return dictionary
320
+
321
+ def __contains__(self, name):
322
+ try:
323
+ return super().__contains__(name)
324
+ except CookieConflictError:
325
+ return True
326
+
327
+ def __getitem__(self, name):
328
+ """Dict-like __getitem__() for compatibility with client code. Throws
329
+ exception if there are more than one cookie with name. In that case,
330
+ use the more explicit get() method instead.
331
+
332
+ .. warning:: operation is O(n), not O(1).
333
+ """
334
+ return self._find_no_duplicates(name)
335
+
336
+ def __setitem__(self, name, value):
337
+ """Dict-like __setitem__ for compatibility with client code. Throws
338
+ exception if there is already a cookie of that name in the jar. In that
339
+ case, use the more explicit set() method instead.
340
+ """
341
+ self.set(name, value)
342
+
343
+ def __delitem__(self, name):
344
+ """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
345
+ ``remove_cookie_by_name()``.
346
+ """
347
+ remove_cookie_by_name(self, name)
348
+
349
+ def set_cookie(self, cookie, *args, **kwargs):
350
+ if (
351
+ hasattr(cookie.value, "startswith")
352
+ and cookie.value.startswith('"')
353
+ and cookie.value.endswith('"')
354
+ ):
355
+ cookie.value = cookie.value.replace('\\"', "")
356
+ return super().set_cookie(cookie, *args, **kwargs)
357
+
358
+ def update(self, other):
359
+ """Updates this jar with cookies from another CookieJar or dict-like"""
360
+ if isinstance(other, cookielib.CookieJar):
361
+ for cookie in other:
362
+ self.set_cookie(copy.copy(cookie))
363
+ else:
364
+ super().update(other)
365
+
366
+ def _find(self, name, domain=None, path=None):
367
+ """Requests uses this method internally to get cookie values.
368
+
369
+ If there are conflicting cookies, _find arbitrarily chooses one.
370
+ See _find_no_duplicates if you want an exception thrown if there are
371
+ conflicting cookies.
372
+
373
+ :param name: a string containing name of cookie
374
+ :param domain: (optional) string containing domain of cookie
375
+ :param path: (optional) string containing path of cookie
376
+ :return: cookie.value
377
+ """
378
+ for cookie in iter(self):
379
+ if cookie.name == name:
380
+ if domain is None or cookie.domain == domain:
381
+ if path is None or cookie.path == path:
382
+ return cookie.value
383
+
384
+ raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
385
+
386
+ def _find_no_duplicates(self, name, domain=None, path=None):
387
+ """Both ``__get_item__`` and ``get`` call this function: it's never
388
+ used elsewhere in Requests.
389
+
390
+ :param name: a string containing name of cookie
391
+ :param domain: (optional) string containing domain of cookie
392
+ :param path: (optional) string containing path of cookie
393
+ :raises KeyError: if cookie is not found
394
+ :raises CookieConflictError: if there are multiple cookies
395
+ that match name and optionally domain and path
396
+ :return: cookie.value
397
+ """
398
+ toReturn = None
399
+ for cookie in iter(self):
400
+ if cookie.name == name:
401
+ if domain is None or cookie.domain == domain:
402
+ if path is None or cookie.path == path:
403
+ if toReturn is not None:
404
+ # if there are multiple cookies that meet passed in criteria
405
+ raise CookieConflictError(
406
+ f"There are multiple cookies with name, {name!r}"
407
+ )
408
+ # we will eventually return this as long as no cookie conflict
409
+ toReturn = cookie.value
410
+
411
+ if toReturn:
412
+ return toReturn
413
+ raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
414
+
415
+ def __getstate__(self):
416
+ """Unlike a normal CookieJar, this class is pickleable."""
417
+ state = self.__dict__.copy()
418
+ # remove the unpickleable RLock object
419
+ state.pop("_cookies_lock")
420
+ return state
421
+
422
+ def __setstate__(self, state):
423
+ """Unlike a normal CookieJar, this class is pickleable."""
424
+ self.__dict__.update(state)
425
+ if "_cookies_lock" not in self.__dict__:
426
+ self._cookies_lock = threading.RLock()
427
+
428
+ def copy(self):
429
+ """Return a copy of this RequestsCookieJar."""
430
+ new_cj = RequestsCookieJar()
431
+ new_cj.set_policy(self.get_policy())
432
+ new_cj.update(self)
433
+ return new_cj
434
+
435
+ def get_policy(self):
436
+ """Return the CookiePolicy instance used."""
437
+ return self._policy
438
+
439
+
440
+ def _copy_cookie_jar(jar):
441
+ if jar is None:
442
+ return None
443
+
444
+ if hasattr(jar, "copy"):
445
+ # We're dealing with an instance of RequestsCookieJar
446
+ return jar.copy()
447
+ # We're dealing with a generic CookieJar instance
448
+ new_jar = copy.copy(jar)
449
+ new_jar.clear()
450
+ for cookie in jar:
451
+ new_jar.set_cookie(copy.copy(cookie))
452
+ return new_jar
453
+
454
+
455
+ def create_cookie(name, value, **kwargs):
456
+ """Make a cookie from underspecified parameters.
457
+
458
+ By default, the pair of `name` and `value` will be set for the domain ''
459
+ and sent on every request (this is sometimes called a "supercookie").
460
+ """
461
+ result = {
462
+ "version": 0,
463
+ "name": name,
464
+ "value": value,
465
+ "port": None,
466
+ "domain": "",
467
+ "path": "/",
468
+ "secure": False,
469
+ "expires": None,
470
+ "discard": True,
471
+ "comment": None,
472
+ "comment_url": None,
473
+ "rest": {"HttpOnly": None},
474
+ "rfc2109": False,
475
+ }
476
+
477
+ badargs = set(kwargs) - set(result)
478
+ if badargs:
479
+ raise TypeError(
480
+ f"create_cookie() got unexpected keyword arguments: {list(badargs)}"
481
+ )
482
+
483
+ result.update(kwargs)
484
+ result["port_specified"] = bool(result["port"])
485
+ result["domain_specified"] = bool(result["domain"])
486
+ result["domain_initial_dot"] = result["domain"].startswith(".")
487
+ result["path_specified"] = bool(result["path"])
488
+
489
+ return cookielib.Cookie(**result)
490
+
491
+
492
+ def morsel_to_cookie(morsel):
493
+ """Convert a Morsel object into a Cookie containing the one k/v pair."""
494
+
495
+ expires = None
496
+ if morsel["max-age"]:
497
+ try:
498
+ expires = int(time.time() + int(morsel["max-age"]))
499
+ except ValueError:
500
+ raise TypeError(f"max-age: {morsel['max-age']} must be integer")
501
+ elif morsel["expires"]:
502
+ time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
503
+ expires = calendar.timegm(time.strptime(morsel["expires"], time_template))
504
+ return create_cookie(
505
+ comment=morsel["comment"],
506
+ comment_url=bool(morsel["comment"]),
507
+ discard=False,
508
+ domain=morsel["domain"],
509
+ expires=expires,
510
+ name=morsel.key,
511
+ path=morsel["path"],
512
+ port=None,
513
+ rest={"HttpOnly": morsel["httponly"]},
514
+ rfc2109=False,
515
+ secure=bool(morsel["secure"]),
516
+ value=morsel.value,
517
+ version=morsel["version"] or 0,
518
+ )
519
+
520
+
521
+ def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
522
+ """Returns a CookieJar from a key/value dictionary.
523
+
524
+ :param cookie_dict: Dict of key/values to insert into CookieJar.
525
+ :param cookiejar: (optional) A cookiejar to add the cookies to.
526
+ :param overwrite: (optional) If False, will not replace cookies
527
+ already in the jar with new ones.
528
+ :rtype: CookieJar
529
+ """
530
+ if cookiejar is None:
531
+ cookiejar = RequestsCookieJar()
532
+
533
+ if cookie_dict is not None:
534
+ names_from_jar = [cookie.name for cookie in cookiejar]
535
+ for name in cookie_dict:
536
+ if overwrite or (name not in names_from_jar):
537
+ cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
538
+
539
+ return cookiejar
540
+
541
+
542
+ def merge_cookies(cookiejar, cookies):
543
+ """Add cookies to cookiejar and returns a merged CookieJar.
544
+
545
+ :param cookiejar: CookieJar object to add the cookies to.
546
+ :param cookies: Dictionary or CookieJar object to be added.
547
+ :rtype: CookieJar
548
+ """
549
+ if not isinstance(cookiejar, cookielib.CookieJar):
550
+ raise ValueError("You can only merge into CookieJar")
551
+
552
+ if isinstance(cookies, dict):
553
+ cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False)
554
+ elif isinstance(cookies, cookielib.CookieJar):
555
+ try:
556
+ cookiejar.update(cookies)
557
+ except AttributeError:
558
+ for cookie_in_jar in cookies:
559
+ cookiejar.set_cookie(cookie_in_jar)
560
+
561
+ return cookiejar
llmeval-env/lib/python3.10/site-packages/requests/hooks.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.hooks
3
+ ~~~~~~~~~~~~~~
4
+
5
+ This module provides the capabilities for the Requests hooks system.
6
+
7
+ Available hooks:
8
+
9
+ ``response``:
10
+ The response generated from a Request.
11
+ """
12
+ HOOKS = ["response"]
13
+
14
+
15
+ def default_hooks():
16
+ return {event: [] for event in HOOKS}
17
+
18
+
19
+ # TODO: response is the only one
20
+
21
+
22
+ def dispatch_hook(key, hooks, hook_data, **kwargs):
23
+ """Dispatches a hook dictionary on a given piece of data."""
24
+ hooks = hooks or {}
25
+ hooks = hooks.get(key)
26
+ if hooks:
27
+ if hasattr(hooks, "__call__"):
28
+ hooks = [hooks]
29
+ for hook in hooks:
30
+ _hook_data = hook(hook_data, **kwargs)
31
+ if _hook_data is not None:
32
+ hook_data = _hook_data
33
+ return hook_data
llmeval-env/lib/python3.10/site-packages/requests/models.py ADDED
@@ -0,0 +1,1034 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.models
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ This module contains the primary objects that power Requests.
6
+ """
7
+
8
+ import datetime
9
+
10
+ # Import encoding now, to avoid implicit import later.
11
+ # Implicit import within threads may cause LookupError when standard library is in a ZIP,
12
+ # such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
13
+ import encodings.idna # noqa: F401
14
+ from io import UnsupportedOperation
15
+
16
+ from urllib3.exceptions import (
17
+ DecodeError,
18
+ LocationParseError,
19
+ ProtocolError,
20
+ ReadTimeoutError,
21
+ SSLError,
22
+ )
23
+ from urllib3.fields import RequestField
24
+ from urllib3.filepost import encode_multipart_formdata
25
+ from urllib3.util import parse_url
26
+
27
+ from ._internal_utils import to_native_string, unicode_is_ascii
28
+ from .auth import HTTPBasicAuth
29
+ from .compat import (
30
+ Callable,
31
+ JSONDecodeError,
32
+ Mapping,
33
+ basestring,
34
+ builtin_str,
35
+ chardet,
36
+ cookielib,
37
+ )
38
+ from .compat import json as complexjson
39
+ from .compat import urlencode, urlsplit, urlunparse
40
+ from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header
41
+ from .exceptions import (
42
+ ChunkedEncodingError,
43
+ ConnectionError,
44
+ ContentDecodingError,
45
+ HTTPError,
46
+ InvalidJSONError,
47
+ InvalidURL,
48
+ )
49
+ from .exceptions import JSONDecodeError as RequestsJSONDecodeError
50
+ from .exceptions import MissingSchema
51
+ from .exceptions import SSLError as RequestsSSLError
52
+ from .exceptions import StreamConsumedError
53
+ from .hooks import default_hooks
54
+ from .status_codes import codes
55
+ from .structures import CaseInsensitiveDict
56
+ from .utils import (
57
+ check_header_validity,
58
+ get_auth_from_url,
59
+ guess_filename,
60
+ guess_json_utf,
61
+ iter_slices,
62
+ parse_header_links,
63
+ requote_uri,
64
+ stream_decode_response_unicode,
65
+ super_len,
66
+ to_key_val_list,
67
+ )
68
+
69
+ #: The set of HTTP status codes that indicate an automatically
70
+ #: processable redirect.
71
+ REDIRECT_STATI = (
72
+ codes.moved, # 301
73
+ codes.found, # 302
74
+ codes.other, # 303
75
+ codes.temporary_redirect, # 307
76
+ codes.permanent_redirect, # 308
77
+ )
78
+
79
+ DEFAULT_REDIRECT_LIMIT = 30
80
+ CONTENT_CHUNK_SIZE = 10 * 1024
81
+ ITER_CHUNK_SIZE = 512
82
+
83
+
84
+ class RequestEncodingMixin:
85
+ @property
86
+ def path_url(self):
87
+ """Build the path URL to use."""
88
+
89
+ url = []
90
+
91
+ p = urlsplit(self.url)
92
+
93
+ path = p.path
94
+ if not path:
95
+ path = "/"
96
+
97
+ url.append(path)
98
+
99
+ query = p.query
100
+ if query:
101
+ url.append("?")
102
+ url.append(query)
103
+
104
+ return "".join(url)
105
+
106
+ @staticmethod
107
+ def _encode_params(data):
108
+ """Encode parameters in a piece of data.
109
+
110
+ Will successfully encode parameters when passed as a dict or a list of
111
+ 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
112
+ if parameters are supplied as a dict.
113
+ """
114
+
115
+ if isinstance(data, (str, bytes)):
116
+ return data
117
+ elif hasattr(data, "read"):
118
+ return data
119
+ elif hasattr(data, "__iter__"):
120
+ result = []
121
+ for k, vs in to_key_val_list(data):
122
+ if isinstance(vs, basestring) or not hasattr(vs, "__iter__"):
123
+ vs = [vs]
124
+ for v in vs:
125
+ if v is not None:
126
+ result.append(
127
+ (
128
+ k.encode("utf-8") if isinstance(k, str) else k,
129
+ v.encode("utf-8") if isinstance(v, str) else v,
130
+ )
131
+ )
132
+ return urlencode(result, doseq=True)
133
+ else:
134
+ return data
135
+
136
+ @staticmethod
137
+ def _encode_files(files, data):
138
+ """Build the body for a multipart/form-data request.
139
+
140
+ Will successfully encode files when passed as a dict or a list of
141
+ tuples. Order is retained if data is a list of tuples but arbitrary
142
+ if parameters are supplied as a dict.
143
+ The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
144
+ or 4-tuples (filename, fileobj, contentype, custom_headers).
145
+ """
146
+ if not files:
147
+ raise ValueError("Files must be provided.")
148
+ elif isinstance(data, basestring):
149
+ raise ValueError("Data must not be a string.")
150
+
151
+ new_fields = []
152
+ fields = to_key_val_list(data or {})
153
+ files = to_key_val_list(files or {})
154
+
155
+ for field, val in fields:
156
+ if isinstance(val, basestring) or not hasattr(val, "__iter__"):
157
+ val = [val]
158
+ for v in val:
159
+ if v is not None:
160
+ # Don't call str() on bytestrings: in Py3 it all goes wrong.
161
+ if not isinstance(v, bytes):
162
+ v = str(v)
163
+
164
+ new_fields.append(
165
+ (
166
+ field.decode("utf-8")
167
+ if isinstance(field, bytes)
168
+ else field,
169
+ v.encode("utf-8") if isinstance(v, str) else v,
170
+ )
171
+ )
172
+
173
+ for (k, v) in files:
174
+ # support for explicit filename
175
+ ft = None
176
+ fh = None
177
+ if isinstance(v, (tuple, list)):
178
+ if len(v) == 2:
179
+ fn, fp = v
180
+ elif len(v) == 3:
181
+ fn, fp, ft = v
182
+ else:
183
+ fn, fp, ft, fh = v
184
+ else:
185
+ fn = guess_filename(v) or k
186
+ fp = v
187
+
188
+ if isinstance(fp, (str, bytes, bytearray)):
189
+ fdata = fp
190
+ elif hasattr(fp, "read"):
191
+ fdata = fp.read()
192
+ elif fp is None:
193
+ continue
194
+ else:
195
+ fdata = fp
196
+
197
+ rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
198
+ rf.make_multipart(content_type=ft)
199
+ new_fields.append(rf)
200
+
201
+ body, content_type = encode_multipart_formdata(new_fields)
202
+
203
+ return body, content_type
204
+
205
+
206
+ class RequestHooksMixin:
207
+ def register_hook(self, event, hook):
208
+ """Properly register a hook."""
209
+
210
+ if event not in self.hooks:
211
+ raise ValueError(f'Unsupported event specified, with event name "{event}"')
212
+
213
+ if isinstance(hook, Callable):
214
+ self.hooks[event].append(hook)
215
+ elif hasattr(hook, "__iter__"):
216
+ self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
217
+
218
+ def deregister_hook(self, event, hook):
219
+ """Deregister a previously registered hook.
220
+ Returns True if the hook existed, False if not.
221
+ """
222
+
223
+ try:
224
+ self.hooks[event].remove(hook)
225
+ return True
226
+ except ValueError:
227
+ return False
228
+
229
+
230
+ class Request(RequestHooksMixin):
231
+ """A user-created :class:`Request <Request>` object.
232
+
233
+ Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
234
+
235
+ :param method: HTTP method to use.
236
+ :param url: URL to send.
237
+ :param headers: dictionary of headers to send.
238
+ :param files: dictionary of {filename: fileobject} files to multipart upload.
239
+ :param data: the body to attach to the request. If a dictionary or
240
+ list of tuples ``[(key, value)]`` is provided, form-encoding will
241
+ take place.
242
+ :param json: json for the body to attach to the request (if files or data is not specified).
243
+ :param params: URL parameters to append to the URL. If a dictionary or
244
+ list of tuples ``[(key, value)]`` is provided, form-encoding will
245
+ take place.
246
+ :param auth: Auth handler or (user, pass) tuple.
247
+ :param cookies: dictionary or CookieJar of cookies to attach to this request.
248
+ :param hooks: dictionary of callback hooks, for internal usage.
249
+
250
+ Usage::
251
+
252
+ >>> import requests
253
+ >>> req = requests.Request('GET', 'https://httpbin.org/get')
254
+ >>> req.prepare()
255
+ <PreparedRequest [GET]>
256
+ """
257
+
258
+ def __init__(
259
+ self,
260
+ method=None,
261
+ url=None,
262
+ headers=None,
263
+ files=None,
264
+ data=None,
265
+ params=None,
266
+ auth=None,
267
+ cookies=None,
268
+ hooks=None,
269
+ json=None,
270
+ ):
271
+
272
+ # Default empty dicts for dict params.
273
+ data = [] if data is None else data
274
+ files = [] if files is None else files
275
+ headers = {} if headers is None else headers
276
+ params = {} if params is None else params
277
+ hooks = {} if hooks is None else hooks
278
+
279
+ self.hooks = default_hooks()
280
+ for (k, v) in list(hooks.items()):
281
+ self.register_hook(event=k, hook=v)
282
+
283
+ self.method = method
284
+ self.url = url
285
+ self.headers = headers
286
+ self.files = files
287
+ self.data = data
288
+ self.json = json
289
+ self.params = params
290
+ self.auth = auth
291
+ self.cookies = cookies
292
+
293
+ def __repr__(self):
294
+ return f"<Request [{self.method}]>"
295
+
296
+ def prepare(self):
297
+ """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
298
+ p = PreparedRequest()
299
+ p.prepare(
300
+ method=self.method,
301
+ url=self.url,
302
+ headers=self.headers,
303
+ files=self.files,
304
+ data=self.data,
305
+ json=self.json,
306
+ params=self.params,
307
+ auth=self.auth,
308
+ cookies=self.cookies,
309
+ hooks=self.hooks,
310
+ )
311
+ return p
312
+
313
+
314
+ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
315
+ """The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
316
+ containing the exact bytes that will be sent to the server.
317
+
318
+ Instances are generated from a :class:`Request <Request>` object, and
319
+ should not be instantiated manually; doing so may produce undesirable
320
+ effects.
321
+
322
+ Usage::
323
+
324
+ >>> import requests
325
+ >>> req = requests.Request('GET', 'https://httpbin.org/get')
326
+ >>> r = req.prepare()
327
+ >>> r
328
+ <PreparedRequest [GET]>
329
+
330
+ >>> s = requests.Session()
331
+ >>> s.send(r)
332
+ <Response [200]>
333
+ """
334
+
335
+ def __init__(self):
336
+ #: HTTP verb to send to the server.
337
+ self.method = None
338
+ #: HTTP URL to send the request to.
339
+ self.url = None
340
+ #: dictionary of HTTP headers.
341
+ self.headers = None
342
+ # The `CookieJar` used to create the Cookie header will be stored here
343
+ # after prepare_cookies is called
344
+ self._cookies = None
345
+ #: request body to send to the server.
346
+ self.body = None
347
+ #: dictionary of callback hooks, for internal usage.
348
+ self.hooks = default_hooks()
349
+ #: integer denoting starting position of a readable file-like body.
350
+ self._body_position = None
351
+
352
+ def prepare(
353
+ self,
354
+ method=None,
355
+ url=None,
356
+ headers=None,
357
+ files=None,
358
+ data=None,
359
+ params=None,
360
+ auth=None,
361
+ cookies=None,
362
+ hooks=None,
363
+ json=None,
364
+ ):
365
+ """Prepares the entire request with the given parameters."""
366
+
367
+ self.prepare_method(method)
368
+ self.prepare_url(url, params)
369
+ self.prepare_headers(headers)
370
+ self.prepare_cookies(cookies)
371
+ self.prepare_body(data, files, json)
372
+ self.prepare_auth(auth, url)
373
+
374
+ # Note that prepare_auth must be last to enable authentication schemes
375
+ # such as OAuth to work on a fully prepared request.
376
+
377
+ # This MUST go after prepare_auth. Authenticators could add a hook
378
+ self.prepare_hooks(hooks)
379
+
380
+ def __repr__(self):
381
+ return f"<PreparedRequest [{self.method}]>"
382
+
383
+ def copy(self):
384
+ p = PreparedRequest()
385
+ p.method = self.method
386
+ p.url = self.url
387
+ p.headers = self.headers.copy() if self.headers is not None else None
388
+ p._cookies = _copy_cookie_jar(self._cookies)
389
+ p.body = self.body
390
+ p.hooks = self.hooks
391
+ p._body_position = self._body_position
392
+ return p
393
+
394
+ def prepare_method(self, method):
395
+ """Prepares the given HTTP method."""
396
+ self.method = method
397
+ if self.method is not None:
398
+ self.method = to_native_string(self.method.upper())
399
+
400
+ @staticmethod
401
+ def _get_idna_encoded_host(host):
402
+ import idna
403
+
404
+ try:
405
+ host = idna.encode(host, uts46=True).decode("utf-8")
406
+ except idna.IDNAError:
407
+ raise UnicodeError
408
+ return host
409
+
410
+ def prepare_url(self, url, params):
411
+ """Prepares the given HTTP URL."""
412
+ #: Accept objects that have string representations.
413
+ #: We're unable to blindly call unicode/str functions
414
+ #: as this will include the bytestring indicator (b'')
415
+ #: on python 3.x.
416
+ #: https://github.com/psf/requests/pull/2238
417
+ if isinstance(url, bytes):
418
+ url = url.decode("utf8")
419
+ else:
420
+ url = str(url)
421
+
422
+ # Remove leading whitespaces from url
423
+ url = url.lstrip()
424
+
425
+ # Don't do any URL preparation for non-HTTP schemes like `mailto`,
426
+ # `data` etc to work around exceptions from `url_parse`, which
427
+ # handles RFC 3986 only.
428
+ if ":" in url and not url.lower().startswith("http"):
429
+ self.url = url
430
+ return
431
+
432
+ # Support for unicode domain names and paths.
433
+ try:
434
+ scheme, auth, host, port, path, query, fragment = parse_url(url)
435
+ except LocationParseError as e:
436
+ raise InvalidURL(*e.args)
437
+
438
+ if not scheme:
439
+ raise MissingSchema(
440
+ f"Invalid URL {url!r}: No scheme supplied. "
441
+ f"Perhaps you meant https://{url}?"
442
+ )
443
+
444
+ if not host:
445
+ raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
446
+
447
+ # In general, we want to try IDNA encoding the hostname if the string contains
448
+ # non-ASCII characters. This allows users to automatically get the correct IDNA
449
+ # behaviour. For strings containing only ASCII characters, we need to also verify
450
+ # it doesn't start with a wildcard (*), before allowing the unencoded hostname.
451
+ if not unicode_is_ascii(host):
452
+ try:
453
+ host = self._get_idna_encoded_host(host)
454
+ except UnicodeError:
455
+ raise InvalidURL("URL has an invalid label.")
456
+ elif host.startswith(("*", ".")):
457
+ raise InvalidURL("URL has an invalid label.")
458
+
459
+ # Carefully reconstruct the network location
460
+ netloc = auth or ""
461
+ if netloc:
462
+ netloc += "@"
463
+ netloc += host
464
+ if port:
465
+ netloc += f":{port}"
466
+
467
+ # Bare domains aren't valid URLs.
468
+ if not path:
469
+ path = "/"
470
+
471
+ if isinstance(params, (str, bytes)):
472
+ params = to_native_string(params)
473
+
474
+ enc_params = self._encode_params(params)
475
+ if enc_params:
476
+ if query:
477
+ query = f"{query}&{enc_params}"
478
+ else:
479
+ query = enc_params
480
+
481
+ url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
482
+ self.url = url
483
+
484
+ def prepare_headers(self, headers):
485
+ """Prepares the given HTTP headers."""
486
+
487
+ self.headers = CaseInsensitiveDict()
488
+ if headers:
489
+ for header in headers.items():
490
+ # Raise exception on invalid header value.
491
+ check_header_validity(header)
492
+ name, value = header
493
+ self.headers[to_native_string(name)] = value
494
+
495
+ def prepare_body(self, data, files, json=None):
496
+ """Prepares the given HTTP body data."""
497
+
498
+ # Check if file, fo, generator, iterator.
499
+ # If not, run through normal process.
500
+
501
+ # Nottin' on you.
502
+ body = None
503
+ content_type = None
504
+
505
+ if not data and json is not None:
506
+ # urllib3 requires a bytes-like body. Python 2's json.dumps
507
+ # provides this natively, but Python 3 gives a Unicode string.
508
+ content_type = "application/json"
509
+
510
+ try:
511
+ body = complexjson.dumps(json, allow_nan=False)
512
+ except ValueError as ve:
513
+ raise InvalidJSONError(ve, request=self)
514
+
515
+ if not isinstance(body, bytes):
516
+ body = body.encode("utf-8")
517
+
518
+ is_stream = all(
519
+ [
520
+ hasattr(data, "__iter__"),
521
+ not isinstance(data, (basestring, list, tuple, Mapping)),
522
+ ]
523
+ )
524
+
525
+ if is_stream:
526
+ try:
527
+ length = super_len(data)
528
+ except (TypeError, AttributeError, UnsupportedOperation):
529
+ length = None
530
+
531
+ body = data
532
+
533
+ if getattr(body, "tell", None) is not None:
534
+ # Record the current file position before reading.
535
+ # This will allow us to rewind a file in the event
536
+ # of a redirect.
537
+ try:
538
+ self._body_position = body.tell()
539
+ except OSError:
540
+ # This differentiates from None, allowing us to catch
541
+ # a failed `tell()` later when trying to rewind the body
542
+ self._body_position = object()
543
+
544
+ if files:
545
+ raise NotImplementedError(
546
+ "Streamed bodies and files are mutually exclusive."
547
+ )
548
+
549
+ if length:
550
+ self.headers["Content-Length"] = builtin_str(length)
551
+ else:
552
+ self.headers["Transfer-Encoding"] = "chunked"
553
+ else:
554
+ # Multi-part file uploads.
555
+ if files:
556
+ (body, content_type) = self._encode_files(files, data)
557
+ else:
558
+ if data:
559
+ body = self._encode_params(data)
560
+ if isinstance(data, basestring) or hasattr(data, "read"):
561
+ content_type = None
562
+ else:
563
+ content_type = "application/x-www-form-urlencoded"
564
+
565
+ self.prepare_content_length(body)
566
+
567
+ # Add content-type if it wasn't explicitly provided.
568
+ if content_type and ("content-type" not in self.headers):
569
+ self.headers["Content-Type"] = content_type
570
+
571
+ self.body = body
572
+
573
+ def prepare_content_length(self, body):
574
+ """Prepare Content-Length header based on request method and body"""
575
+ if body is not None:
576
+ length = super_len(body)
577
+ if length:
578
+ # If length exists, set it. Otherwise, we fallback
579
+ # to Transfer-Encoding: chunked.
580
+ self.headers["Content-Length"] = builtin_str(length)
581
+ elif (
582
+ self.method not in ("GET", "HEAD")
583
+ and self.headers.get("Content-Length") is None
584
+ ):
585
+ # Set Content-Length to 0 for methods that can have a body
586
+ # but don't provide one. (i.e. not GET or HEAD)
587
+ self.headers["Content-Length"] = "0"
588
+
589
+ def prepare_auth(self, auth, url=""):
590
+ """Prepares the given HTTP auth data."""
591
+
592
+ # If no Auth is explicitly provided, extract it from the URL first.
593
+ if auth is None:
594
+ url_auth = get_auth_from_url(self.url)
595
+ auth = url_auth if any(url_auth) else None
596
+
597
+ if auth:
598
+ if isinstance(auth, tuple) and len(auth) == 2:
599
+ # special-case basic HTTP auth
600
+ auth = HTTPBasicAuth(*auth)
601
+
602
+ # Allow auth to make its changes.
603
+ r = auth(self)
604
+
605
+ # Update self to reflect the auth changes.
606
+ self.__dict__.update(r.__dict__)
607
+
608
+ # Recompute Content-Length
609
+ self.prepare_content_length(self.body)
610
+
611
+ def prepare_cookies(self, cookies):
612
+ """Prepares the given HTTP cookie data.
613
+
614
+ This function eventually generates a ``Cookie`` header from the
615
+ given cookies using cookielib. Due to cookielib's design, the header
616
+ will not be regenerated if it already exists, meaning this function
617
+ can only be called once for the life of the
618
+ :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
619
+ to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
620
+ header is removed beforehand.
621
+ """
622
+ if isinstance(cookies, cookielib.CookieJar):
623
+ self._cookies = cookies
624
+ else:
625
+ self._cookies = cookiejar_from_dict(cookies)
626
+
627
+ cookie_header = get_cookie_header(self._cookies, self)
628
+ if cookie_header is not None:
629
+ self.headers["Cookie"] = cookie_header
630
+
631
+ def prepare_hooks(self, hooks):
632
+ """Prepares the given hooks."""
633
+ # hooks can be passed as None to the prepare method and to this
634
+ # method. To prevent iterating over None, simply use an empty list
635
+ # if hooks is False-y
636
+ hooks = hooks or []
637
+ for event in hooks:
638
+ self.register_hook(event, hooks[event])
639
+
640
+
641
+ class Response:
642
+ """The :class:`Response <Response>` object, which contains a
643
+ server's response to an HTTP request.
644
+ """
645
+
646
+ __attrs__ = [
647
+ "_content",
648
+ "status_code",
649
+ "headers",
650
+ "url",
651
+ "history",
652
+ "encoding",
653
+ "reason",
654
+ "cookies",
655
+ "elapsed",
656
+ "request",
657
+ ]
658
+
659
+ def __init__(self):
660
+ self._content = False
661
+ self._content_consumed = False
662
+ self._next = None
663
+
664
+ #: Integer Code of responded HTTP Status, e.g. 404 or 200.
665
+ self.status_code = None
666
+
667
+ #: Case-insensitive Dictionary of Response Headers.
668
+ #: For example, ``headers['content-encoding']`` will return the
669
+ #: value of a ``'Content-Encoding'`` response header.
670
+ self.headers = CaseInsensitiveDict()
671
+
672
+ #: File-like object representation of response (for advanced usage).
673
+ #: Use of ``raw`` requires that ``stream=True`` be set on the request.
674
+ #: This requirement does not apply for use internally to Requests.
675
+ self.raw = None
676
+
677
+ #: Final URL location of Response.
678
+ self.url = None
679
+
680
+ #: Encoding to decode with when accessing r.text.
681
+ self.encoding = None
682
+
683
+ #: A list of :class:`Response <Response>` objects from
684
+ #: the history of the Request. Any redirect responses will end
685
+ #: up here. The list is sorted from the oldest to the most recent request.
686
+ self.history = []
687
+
688
+ #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
689
+ self.reason = None
690
+
691
+ #: A CookieJar of Cookies the server sent back.
692
+ self.cookies = cookiejar_from_dict({})
693
+
694
+ #: The amount of time elapsed between sending the request
695
+ #: and the arrival of the response (as a timedelta).
696
+ #: This property specifically measures the time taken between sending
697
+ #: the first byte of the request and finishing parsing the headers. It
698
+ #: is therefore unaffected by consuming the response content or the
699
+ #: value of the ``stream`` keyword argument.
700
+ self.elapsed = datetime.timedelta(0)
701
+
702
+ #: The :class:`PreparedRequest <PreparedRequest>` object to which this
703
+ #: is a response.
704
+ self.request = None
705
+
706
+ def __enter__(self):
707
+ return self
708
+
709
+ def __exit__(self, *args):
710
+ self.close()
711
+
712
+ def __getstate__(self):
713
+ # Consume everything; accessing the content attribute makes
714
+ # sure the content has been fully read.
715
+ if not self._content_consumed:
716
+ self.content
717
+
718
+ return {attr: getattr(self, attr, None) for attr in self.__attrs__}
719
+
720
+ def __setstate__(self, state):
721
+ for name, value in state.items():
722
+ setattr(self, name, value)
723
+
724
+ # pickled objects do not have .raw
725
+ setattr(self, "_content_consumed", True)
726
+ setattr(self, "raw", None)
727
+
728
+ def __repr__(self):
729
+ return f"<Response [{self.status_code}]>"
730
+
731
+ def __bool__(self):
732
+ """Returns True if :attr:`status_code` is less than 400.
733
+
734
+ This attribute checks if the status code of the response is between
735
+ 400 and 600 to see if there was a client error or a server error. If
736
+ the status code, is between 200 and 400, this will return True. This
737
+ is **not** a check to see if the response code is ``200 OK``.
738
+ """
739
+ return self.ok
740
+
741
+ def __nonzero__(self):
742
+ """Returns True if :attr:`status_code` is less than 400.
743
+
744
+ This attribute checks if the status code of the response is between
745
+ 400 and 600 to see if there was a client error or a server error. If
746
+ the status code, is between 200 and 400, this will return True. This
747
+ is **not** a check to see if the response code is ``200 OK``.
748
+ """
749
+ return self.ok
750
+
751
+ def __iter__(self):
752
+ """Allows you to use a response as an iterator."""
753
+ return self.iter_content(128)
754
+
755
+ @property
756
+ def ok(self):
757
+ """Returns True if :attr:`status_code` is less than 400, False if not.
758
+
759
+ This attribute checks if the status code of the response is between
760
+ 400 and 600 to see if there was a client error or a server error. If
761
+ the status code is between 200 and 400, this will return True. This
762
+ is **not** a check to see if the response code is ``200 OK``.
763
+ """
764
+ try:
765
+ self.raise_for_status()
766
+ except HTTPError:
767
+ return False
768
+ return True
769
+
770
+ @property
771
+ def is_redirect(self):
772
+ """True if this Response is a well-formed HTTP redirect that could have
773
+ been processed automatically (by :meth:`Session.resolve_redirects`).
774
+ """
775
+ return "location" in self.headers and self.status_code in REDIRECT_STATI
776
+
777
+ @property
778
+ def is_permanent_redirect(self):
779
+ """True if this Response one of the permanent versions of redirect."""
780
+ return "location" in self.headers and self.status_code in (
781
+ codes.moved_permanently,
782
+ codes.permanent_redirect,
783
+ )
784
+
785
+ @property
786
+ def next(self):
787
+ """Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
788
+ return self._next
789
+
790
+ @property
791
+ def apparent_encoding(self):
792
+ """The apparent encoding, provided by the charset_normalizer or chardet libraries."""
793
+ return chardet.detect(self.content)["encoding"]
794
+
795
+ def iter_content(self, chunk_size=1, decode_unicode=False):
796
+ """Iterates over the response data. When stream=True is set on the
797
+ request, this avoids reading the content at once into memory for
798
+ large responses. The chunk size is the number of bytes it should
799
+ read into memory. This is not necessarily the length of each item
800
+ returned as decoding can take place.
801
+
802
+ chunk_size must be of type int or None. A value of None will
803
+ function differently depending on the value of `stream`.
804
+ stream=True will read data as it arrives in whatever size the
805
+ chunks are received. If stream=False, data is returned as
806
+ a single chunk.
807
+
808
+ If decode_unicode is True, content will be decoded using the best
809
+ available encoding based on the response.
810
+ """
811
+
812
+ def generate():
813
+ # Special case for urllib3.
814
+ if hasattr(self.raw, "stream"):
815
+ try:
816
+ yield from self.raw.stream(chunk_size, decode_content=True)
817
+ except ProtocolError as e:
818
+ raise ChunkedEncodingError(e)
819
+ except DecodeError as e:
820
+ raise ContentDecodingError(e)
821
+ except ReadTimeoutError as e:
822
+ raise ConnectionError(e)
823
+ except SSLError as e:
824
+ raise RequestsSSLError(e)
825
+ else:
826
+ # Standard file-like object.
827
+ while True:
828
+ chunk = self.raw.read(chunk_size)
829
+ if not chunk:
830
+ break
831
+ yield chunk
832
+
833
+ self._content_consumed = True
834
+
835
+ if self._content_consumed and isinstance(self._content, bool):
836
+ raise StreamConsumedError()
837
+ elif chunk_size is not None and not isinstance(chunk_size, int):
838
+ raise TypeError(
839
+ f"chunk_size must be an int, it is instead a {type(chunk_size)}."
840
+ )
841
+ # simulate reading small chunks of the content
842
+ reused_chunks = iter_slices(self._content, chunk_size)
843
+
844
+ stream_chunks = generate()
845
+
846
+ chunks = reused_chunks if self._content_consumed else stream_chunks
847
+
848
+ if decode_unicode:
849
+ chunks = stream_decode_response_unicode(chunks, self)
850
+
851
+ return chunks
852
+
853
+ def iter_lines(
854
+ self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
855
+ ):
856
+ """Iterates over the response data, one line at a time. When
857
+ stream=True is set on the request, this avoids reading the
858
+ content at once into memory for large responses.
859
+
860
+ .. note:: This method is not reentrant safe.
861
+ """
862
+
863
+ pending = None
864
+
865
+ for chunk in self.iter_content(
866
+ chunk_size=chunk_size, decode_unicode=decode_unicode
867
+ ):
868
+
869
+ if pending is not None:
870
+ chunk = pending + chunk
871
+
872
+ if delimiter:
873
+ lines = chunk.split(delimiter)
874
+ else:
875
+ lines = chunk.splitlines()
876
+
877
+ if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
878
+ pending = lines.pop()
879
+ else:
880
+ pending = None
881
+
882
+ yield from lines
883
+
884
+ if pending is not None:
885
+ yield pending
886
+
887
+ @property
888
+ def content(self):
889
+ """Content of the response, in bytes."""
890
+
891
+ if self._content is False:
892
+ # Read the contents.
893
+ if self._content_consumed:
894
+ raise RuntimeError("The content for this response was already consumed")
895
+
896
+ if self.status_code == 0 or self.raw is None:
897
+ self._content = None
898
+ else:
899
+ self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b""
900
+
901
+ self._content_consumed = True
902
+ # don't need to release the connection; that's been handled by urllib3
903
+ # since we exhausted the data.
904
+ return self._content
905
+
906
+ @property
907
+ def text(self):
908
+ """Content of the response, in unicode.
909
+
910
+ If Response.encoding is None, encoding will be guessed using
911
+ ``charset_normalizer`` or ``chardet``.
912
+
913
+ The encoding of the response content is determined based solely on HTTP
914
+ headers, following RFC 2616 to the letter. If you can take advantage of
915
+ non-HTTP knowledge to make a better guess at the encoding, you should
916
+ set ``r.encoding`` appropriately before accessing this property.
917
+ """
918
+
919
+ # Try charset from content-type
920
+ content = None
921
+ encoding = self.encoding
922
+
923
+ if not self.content:
924
+ return ""
925
+
926
+ # Fallback to auto-detected encoding.
927
+ if self.encoding is None:
928
+ encoding = self.apparent_encoding
929
+
930
+ # Decode unicode from given encoding.
931
+ try:
932
+ content = str(self.content, encoding, errors="replace")
933
+ except (LookupError, TypeError):
934
+ # A LookupError is raised if the encoding was not found which could
935
+ # indicate a misspelling or similar mistake.
936
+ #
937
+ # A TypeError can be raised if encoding is None
938
+ #
939
+ # So we try blindly encoding.
940
+ content = str(self.content, errors="replace")
941
+
942
+ return content
943
+
944
+ def json(self, **kwargs):
945
+ r"""Returns the json-encoded content of a response, if any.
946
+
947
+ :param \*\*kwargs: Optional arguments that ``json.loads`` takes.
948
+ :raises requests.exceptions.JSONDecodeError: If the response body does not
949
+ contain valid json.
950
+ """
951
+
952
+ if not self.encoding and self.content and len(self.content) > 3:
953
+ # No encoding set. JSON RFC 4627 section 3 states we should expect
954
+ # UTF-8, -16 or -32. Detect which one to use; If the detection or
955
+ # decoding fails, fall back to `self.text` (using charset_normalizer to make
956
+ # a best guess).
957
+ encoding = guess_json_utf(self.content)
958
+ if encoding is not None:
959
+ try:
960
+ return complexjson.loads(self.content.decode(encoding), **kwargs)
961
+ except UnicodeDecodeError:
962
+ # Wrong UTF codec detected; usually because it's not UTF-8
963
+ # but some other 8-bit codec. This is an RFC violation,
964
+ # and the server didn't bother to tell us what codec *was*
965
+ # used.
966
+ pass
967
+ except JSONDecodeError as e:
968
+ raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
969
+
970
+ try:
971
+ return complexjson.loads(self.text, **kwargs)
972
+ except JSONDecodeError as e:
973
+ # Catch JSON-related errors and raise as requests.JSONDecodeError
974
+ # This aliases json.JSONDecodeError and simplejson.JSONDecodeError
975
+ raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
976
+
977
+ @property
978
+ def links(self):
979
+ """Returns the parsed header links of the response, if any."""
980
+
981
+ header = self.headers.get("link")
982
+
983
+ resolved_links = {}
984
+
985
+ if header:
986
+ links = parse_header_links(header)
987
+
988
+ for link in links:
989
+ key = link.get("rel") or link.get("url")
990
+ resolved_links[key] = link
991
+
992
+ return resolved_links
993
+
994
+ def raise_for_status(self):
995
+ """Raises :class:`HTTPError`, if one occurred."""
996
+
997
+ http_error_msg = ""
998
+ if isinstance(self.reason, bytes):
999
+ # We attempt to decode utf-8 first because some servers
1000
+ # choose to localize their reason strings. If the string
1001
+ # isn't utf-8, we fall back to iso-8859-1 for all other
1002
+ # encodings. (See PR #3538)
1003
+ try:
1004
+ reason = self.reason.decode("utf-8")
1005
+ except UnicodeDecodeError:
1006
+ reason = self.reason.decode("iso-8859-1")
1007
+ else:
1008
+ reason = self.reason
1009
+
1010
+ if 400 <= self.status_code < 500:
1011
+ http_error_msg = (
1012
+ f"{self.status_code} Client Error: {reason} for url: {self.url}"
1013
+ )
1014
+
1015
+ elif 500 <= self.status_code < 600:
1016
+ http_error_msg = (
1017
+ f"{self.status_code} Server Error: {reason} for url: {self.url}"
1018
+ )
1019
+
1020
+ if http_error_msg:
1021
+ raise HTTPError(http_error_msg, response=self)
1022
+
1023
+ def close(self):
1024
+ """Releases the connection back to the pool. Once this method has been
1025
+ called the underlying ``raw`` object must not be accessed again.
1026
+
1027
+ *Note: Should not normally need to be called explicitly.*
1028
+ """
1029
+ if not self._content_consumed:
1030
+ self.raw.close()
1031
+
1032
+ release_conn = getattr(self.raw, "release_conn", None)
1033
+ if release_conn is not None:
1034
+ release_conn()
llmeval-env/lib/python3.10/site-packages/requests/sessions.py ADDED
@@ -0,0 +1,833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.sessions
3
+ ~~~~~~~~~~~~~~~~~
4
+
5
+ This module provides a Session object to manage and persist settings across
6
+ requests (cookies, auth, proxies).
7
+ """
8
+ import os
9
+ import sys
10
+ import time
11
+ from collections import OrderedDict
12
+ from datetime import timedelta
13
+
14
+ from ._internal_utils import to_native_string
15
+ from .adapters import HTTPAdapter
16
+ from .auth import _basic_auth_str
17
+ from .compat import Mapping, cookielib, urljoin, urlparse
18
+ from .cookies import (
19
+ RequestsCookieJar,
20
+ cookiejar_from_dict,
21
+ extract_cookies_to_jar,
22
+ merge_cookies,
23
+ )
24
+ from .exceptions import (
25
+ ChunkedEncodingError,
26
+ ContentDecodingError,
27
+ InvalidSchema,
28
+ TooManyRedirects,
29
+ )
30
+ from .hooks import default_hooks, dispatch_hook
31
+
32
+ # formerly defined here, reexposed here for backward compatibility
33
+ from .models import ( # noqa: F401
34
+ DEFAULT_REDIRECT_LIMIT,
35
+ REDIRECT_STATI,
36
+ PreparedRequest,
37
+ Request,
38
+ )
39
+ from .status_codes import codes
40
+ from .structures import CaseInsensitiveDict
41
+ from .utils import ( # noqa: F401
42
+ DEFAULT_PORTS,
43
+ default_headers,
44
+ get_auth_from_url,
45
+ get_environ_proxies,
46
+ get_netrc_auth,
47
+ requote_uri,
48
+ resolve_proxies,
49
+ rewind_body,
50
+ should_bypass_proxies,
51
+ to_key_val_list,
52
+ )
53
+
54
+ # Preferred clock, based on which one is more accurate on a given system.
55
+ if sys.platform == "win32":
56
+ preferred_clock = time.perf_counter
57
+ else:
58
+ preferred_clock = time.time
59
+
60
+
61
+ def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
62
+ """Determines appropriate setting for a given request, taking into account
63
+ the explicit setting on that request, and the setting in the session. If a
64
+ setting is a dictionary, they will be merged together using `dict_class`
65
+ """
66
+
67
+ if session_setting is None:
68
+ return request_setting
69
+
70
+ if request_setting is None:
71
+ return session_setting
72
+
73
+ # Bypass if not a dictionary (e.g. verify)
74
+ if not (
75
+ isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
76
+ ):
77
+ return request_setting
78
+
79
+ merged_setting = dict_class(to_key_val_list(session_setting))
80
+ merged_setting.update(to_key_val_list(request_setting))
81
+
82
+ # Remove keys that are set to None. Extract keys first to avoid altering
83
+ # the dictionary during iteration.
84
+ none_keys = [k for (k, v) in merged_setting.items() if v is None]
85
+ for key in none_keys:
86
+ del merged_setting[key]
87
+
88
+ return merged_setting
89
+
90
+
91
+ def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
92
+ """Properly merges both requests and session hooks.
93
+
94
+ This is necessary because when request_hooks == {'response': []}, the
95
+ merge breaks Session hooks entirely.
96
+ """
97
+ if session_hooks is None or session_hooks.get("response") == []:
98
+ return request_hooks
99
+
100
+ if request_hooks is None or request_hooks.get("response") == []:
101
+ return session_hooks
102
+
103
+ return merge_setting(request_hooks, session_hooks, dict_class)
104
+
105
+
106
+ class SessionRedirectMixin:
107
+ def get_redirect_target(self, resp):
108
+ """Receives a Response. Returns a redirect URI or ``None``"""
109
+ # Due to the nature of how requests processes redirects this method will
110
+ # be called at least once upon the original response and at least twice
111
+ # on each subsequent redirect response (if any).
112
+ # If a custom mixin is used to handle this logic, it may be advantageous
113
+ # to cache the redirect location onto the response object as a private
114
+ # attribute.
115
+ if resp.is_redirect:
116
+ location = resp.headers["location"]
117
+ # Currently the underlying http module on py3 decode headers
118
+ # in latin1, but empirical evidence suggests that latin1 is very
119
+ # rarely used with non-ASCII characters in HTTP headers.
120
+ # It is more likely to get UTF8 header rather than latin1.
121
+ # This causes incorrect handling of UTF8 encoded location headers.
122
+ # To solve this, we re-encode the location in latin1.
123
+ location = location.encode("latin1")
124
+ return to_native_string(location, "utf8")
125
+ return None
126
+
127
+ def should_strip_auth(self, old_url, new_url):
128
+ """Decide whether Authorization header should be removed when redirecting"""
129
+ old_parsed = urlparse(old_url)
130
+ new_parsed = urlparse(new_url)
131
+ if old_parsed.hostname != new_parsed.hostname:
132
+ return True
133
+ # Special case: allow http -> https redirect when using the standard
134
+ # ports. This isn't specified by RFC 7235, but is kept to avoid
135
+ # breaking backwards compatibility with older versions of requests
136
+ # that allowed any redirects on the same host.
137
+ if (
138
+ old_parsed.scheme == "http"
139
+ and old_parsed.port in (80, None)
140
+ and new_parsed.scheme == "https"
141
+ and new_parsed.port in (443, None)
142
+ ):
143
+ return False
144
+
145
+ # Handle default port usage corresponding to scheme.
146
+ changed_port = old_parsed.port != new_parsed.port
147
+ changed_scheme = old_parsed.scheme != new_parsed.scheme
148
+ default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
149
+ if (
150
+ not changed_scheme
151
+ and old_parsed.port in default_port
152
+ and new_parsed.port in default_port
153
+ ):
154
+ return False
155
+
156
+ # Standard case: root URI must match
157
+ return changed_port or changed_scheme
158
+
159
+ def resolve_redirects(
160
+ self,
161
+ resp,
162
+ req,
163
+ stream=False,
164
+ timeout=None,
165
+ verify=True,
166
+ cert=None,
167
+ proxies=None,
168
+ yield_requests=False,
169
+ **adapter_kwargs,
170
+ ):
171
+ """Receives a Response. Returns a generator of Responses or Requests."""
172
+
173
+ hist = [] # keep track of history
174
+
175
+ url = self.get_redirect_target(resp)
176
+ previous_fragment = urlparse(req.url).fragment
177
+ while url:
178
+ prepared_request = req.copy()
179
+
180
+ # Update history and keep track of redirects.
181
+ # resp.history must ignore the original request in this loop
182
+ hist.append(resp)
183
+ resp.history = hist[1:]
184
+
185
+ try:
186
+ resp.content # Consume socket so it can be released
187
+ except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
188
+ resp.raw.read(decode_content=False)
189
+
190
+ if len(resp.history) >= self.max_redirects:
191
+ raise TooManyRedirects(
192
+ f"Exceeded {self.max_redirects} redirects.", response=resp
193
+ )
194
+
195
+ # Release the connection back into the pool.
196
+ resp.close()
197
+
198
+ # Handle redirection without scheme (see: RFC 1808 Section 4)
199
+ if url.startswith("//"):
200
+ parsed_rurl = urlparse(resp.url)
201
+ url = ":".join([to_native_string(parsed_rurl.scheme), url])
202
+
203
+ # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
204
+ parsed = urlparse(url)
205
+ if parsed.fragment == "" and previous_fragment:
206
+ parsed = parsed._replace(fragment=previous_fragment)
207
+ elif parsed.fragment:
208
+ previous_fragment = parsed.fragment
209
+ url = parsed.geturl()
210
+
211
+ # Facilitate relative 'location' headers, as allowed by RFC 7231.
212
+ # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
213
+ # Compliant with RFC3986, we percent encode the url.
214
+ if not parsed.netloc:
215
+ url = urljoin(resp.url, requote_uri(url))
216
+ else:
217
+ url = requote_uri(url)
218
+
219
+ prepared_request.url = to_native_string(url)
220
+
221
+ self.rebuild_method(prepared_request, resp)
222
+
223
+ # https://github.com/psf/requests/issues/1084
224
+ if resp.status_code not in (
225
+ codes.temporary_redirect,
226
+ codes.permanent_redirect,
227
+ ):
228
+ # https://github.com/psf/requests/issues/3490
229
+ purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding")
230
+ for header in purged_headers:
231
+ prepared_request.headers.pop(header, None)
232
+ prepared_request.body = None
233
+
234
+ headers = prepared_request.headers
235
+ headers.pop("Cookie", None)
236
+
237
+ # Extract any cookies sent on the response to the cookiejar
238
+ # in the new request. Because we've mutated our copied prepared
239
+ # request, use the old one that we haven't yet touched.
240
+ extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
241
+ merge_cookies(prepared_request._cookies, self.cookies)
242
+ prepared_request.prepare_cookies(prepared_request._cookies)
243
+
244
+ # Rebuild auth and proxy information.
245
+ proxies = self.rebuild_proxies(prepared_request, proxies)
246
+ self.rebuild_auth(prepared_request, resp)
247
+
248
+ # A failed tell() sets `_body_position` to `object()`. This non-None
249
+ # value ensures `rewindable` will be True, allowing us to raise an
250
+ # UnrewindableBodyError, instead of hanging the connection.
251
+ rewindable = prepared_request._body_position is not None and (
252
+ "Content-Length" in headers or "Transfer-Encoding" in headers
253
+ )
254
+
255
+ # Attempt to rewind consumed file-like object.
256
+ if rewindable:
257
+ rewind_body(prepared_request)
258
+
259
+ # Override the original request.
260
+ req = prepared_request
261
+
262
+ if yield_requests:
263
+ yield req
264
+ else:
265
+
266
+ resp = self.send(
267
+ req,
268
+ stream=stream,
269
+ timeout=timeout,
270
+ verify=verify,
271
+ cert=cert,
272
+ proxies=proxies,
273
+ allow_redirects=False,
274
+ **adapter_kwargs,
275
+ )
276
+
277
+ extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
278
+
279
+ # extract redirect url, if any, for the next loop
280
+ url = self.get_redirect_target(resp)
281
+ yield resp
282
+
283
+ def rebuild_auth(self, prepared_request, response):
284
+ """When being redirected we may want to strip authentication from the
285
+ request to avoid leaking credentials. This method intelligently removes
286
+ and reapplies authentication where possible to avoid credential loss.
287
+ """
288
+ headers = prepared_request.headers
289
+ url = prepared_request.url
290
+
291
+ if "Authorization" in headers and self.should_strip_auth(
292
+ response.request.url, url
293
+ ):
294
+ # If we get redirected to a new host, we should strip out any
295
+ # authentication headers.
296
+ del headers["Authorization"]
297
+
298
+ # .netrc might have more auth for us on our new host.
299
+ new_auth = get_netrc_auth(url) if self.trust_env else None
300
+ if new_auth is not None:
301
+ prepared_request.prepare_auth(new_auth)
302
+
303
+ def rebuild_proxies(self, prepared_request, proxies):
304
+ """This method re-evaluates the proxy configuration by considering the
305
+ environment variables. If we are redirected to a URL covered by
306
+ NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
307
+ proxy keys for this URL (in case they were stripped by a previous
308
+ redirect).
309
+
310
+ This method also replaces the Proxy-Authorization header where
311
+ necessary.
312
+
313
+ :rtype: dict
314
+ """
315
+ headers = prepared_request.headers
316
+ scheme = urlparse(prepared_request.url).scheme
317
+ new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
318
+
319
+ if "Proxy-Authorization" in headers:
320
+ del headers["Proxy-Authorization"]
321
+
322
+ try:
323
+ username, password = get_auth_from_url(new_proxies[scheme])
324
+ except KeyError:
325
+ username, password = None, None
326
+
327
+ # urllib3 handles proxy authorization for us in the standard adapter.
328
+ # Avoid appending this to TLS tunneled requests where it may be leaked.
329
+ if not scheme.startswith('https') and username and password:
330
+ headers["Proxy-Authorization"] = _basic_auth_str(username, password)
331
+
332
+ return new_proxies
333
+
334
+ def rebuild_method(self, prepared_request, response):
335
+ """When being redirected we may want to change the method of the request
336
+ based on certain specs or browser behavior.
337
+ """
338
+ method = prepared_request.method
339
+
340
+ # https://tools.ietf.org/html/rfc7231#section-6.4.4
341
+ if response.status_code == codes.see_other and method != "HEAD":
342
+ method = "GET"
343
+
344
+ # Do what the browsers do, despite standards...
345
+ # First, turn 302s into GETs.
346
+ if response.status_code == codes.found and method != "HEAD":
347
+ method = "GET"
348
+
349
+ # Second, if a POST is responded to with a 301, turn it into a GET.
350
+ # This bizarre behaviour is explained in Issue 1704.
351
+ if response.status_code == codes.moved and method == "POST":
352
+ method = "GET"
353
+
354
+ prepared_request.method = method
355
+
356
+
357
+ class Session(SessionRedirectMixin):
358
+ """A Requests session.
359
+
360
+ Provides cookie persistence, connection-pooling, and configuration.
361
+
362
+ Basic Usage::
363
+
364
+ >>> import requests
365
+ >>> s = requests.Session()
366
+ >>> s.get('https://httpbin.org/get')
367
+ <Response [200]>
368
+
369
+ Or as a context manager::
370
+
371
+ >>> with requests.Session() as s:
372
+ ... s.get('https://httpbin.org/get')
373
+ <Response [200]>
374
+ """
375
+
376
+ __attrs__ = [
377
+ "headers",
378
+ "cookies",
379
+ "auth",
380
+ "proxies",
381
+ "hooks",
382
+ "params",
383
+ "verify",
384
+ "cert",
385
+ "adapters",
386
+ "stream",
387
+ "trust_env",
388
+ "max_redirects",
389
+ ]
390
+
391
+ def __init__(self):
392
+
393
+ #: A case-insensitive dictionary of headers to be sent on each
394
+ #: :class:`Request <Request>` sent from this
395
+ #: :class:`Session <Session>`.
396
+ self.headers = default_headers()
397
+
398
+ #: Default Authentication tuple or object to attach to
399
+ #: :class:`Request <Request>`.
400
+ self.auth = None
401
+
402
+ #: Dictionary mapping protocol or protocol and host to the URL of the proxy
403
+ #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
404
+ #: be used on each :class:`Request <Request>`.
405
+ self.proxies = {}
406
+
407
+ #: Event-handling hooks.
408
+ self.hooks = default_hooks()
409
+
410
+ #: Dictionary of querystring data to attach to each
411
+ #: :class:`Request <Request>`. The dictionary values may be lists for
412
+ #: representing multivalued query parameters.
413
+ self.params = {}
414
+
415
+ #: Stream response content default.
416
+ self.stream = False
417
+
418
+ #: SSL Verification default.
419
+ #: Defaults to `True`, requiring requests to verify the TLS certificate at the
420
+ #: remote end.
421
+ #: If verify is set to `False`, requests will accept any TLS certificate
422
+ #: presented by the server, and will ignore hostname mismatches and/or
423
+ #: expired certificates, which will make your application vulnerable to
424
+ #: man-in-the-middle (MitM) attacks.
425
+ #: Only set this to `False` for testing.
426
+ self.verify = True
427
+
428
+ #: SSL client certificate default, if String, path to ssl client
429
+ #: cert file (.pem). If Tuple, ('cert', 'key') pair.
430
+ self.cert = None
431
+
432
+ #: Maximum number of redirects allowed. If the request exceeds this
433
+ #: limit, a :class:`TooManyRedirects` exception is raised.
434
+ #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
435
+ #: 30.
436
+ self.max_redirects = DEFAULT_REDIRECT_LIMIT
437
+
438
+ #: Trust environment settings for proxy configuration, default
439
+ #: authentication and similar.
440
+ self.trust_env = True
441
+
442
+ #: A CookieJar containing all currently outstanding cookies set on this
443
+ #: session. By default it is a
444
+ #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
445
+ #: may be any other ``cookielib.CookieJar`` compatible object.
446
+ self.cookies = cookiejar_from_dict({})
447
+
448
+ # Default connection adapters.
449
+ self.adapters = OrderedDict()
450
+ self.mount("https://", HTTPAdapter())
451
+ self.mount("http://", HTTPAdapter())
452
+
453
+ def __enter__(self):
454
+ return self
455
+
456
+ def __exit__(self, *args):
457
+ self.close()
458
+
459
+ def prepare_request(self, request):
460
+ """Constructs a :class:`PreparedRequest <PreparedRequest>` for
461
+ transmission and returns it. The :class:`PreparedRequest` has settings
462
+ merged from the :class:`Request <Request>` instance and those of the
463
+ :class:`Session`.
464
+
465
+ :param request: :class:`Request` instance to prepare with this
466
+ session's settings.
467
+ :rtype: requests.PreparedRequest
468
+ """
469
+ cookies = request.cookies or {}
470
+
471
+ # Bootstrap CookieJar.
472
+ if not isinstance(cookies, cookielib.CookieJar):
473
+ cookies = cookiejar_from_dict(cookies)
474
+
475
+ # Merge with session cookies
476
+ merged_cookies = merge_cookies(
477
+ merge_cookies(RequestsCookieJar(), self.cookies), cookies
478
+ )
479
+
480
+ # Set environment's basic authentication if not explicitly set.
481
+ auth = request.auth
482
+ if self.trust_env and not auth and not self.auth:
483
+ auth = get_netrc_auth(request.url)
484
+
485
+ p = PreparedRequest()
486
+ p.prepare(
487
+ method=request.method.upper(),
488
+ url=request.url,
489
+ files=request.files,
490
+ data=request.data,
491
+ json=request.json,
492
+ headers=merge_setting(
493
+ request.headers, self.headers, dict_class=CaseInsensitiveDict
494
+ ),
495
+ params=merge_setting(request.params, self.params),
496
+ auth=merge_setting(auth, self.auth),
497
+ cookies=merged_cookies,
498
+ hooks=merge_hooks(request.hooks, self.hooks),
499
+ )
500
+ return p
501
+
502
+ def request(
503
+ self,
504
+ method,
505
+ url,
506
+ params=None,
507
+ data=None,
508
+ headers=None,
509
+ cookies=None,
510
+ files=None,
511
+ auth=None,
512
+ timeout=None,
513
+ allow_redirects=True,
514
+ proxies=None,
515
+ hooks=None,
516
+ stream=None,
517
+ verify=None,
518
+ cert=None,
519
+ json=None,
520
+ ):
521
+ """Constructs a :class:`Request <Request>`, prepares it and sends it.
522
+ Returns :class:`Response <Response>` object.
523
+
524
+ :param method: method for the new :class:`Request` object.
525
+ :param url: URL for the new :class:`Request` object.
526
+ :param params: (optional) Dictionary or bytes to be sent in the query
527
+ string for the :class:`Request`.
528
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
529
+ object to send in the body of the :class:`Request`.
530
+ :param json: (optional) json to send in the body of the
531
+ :class:`Request`.
532
+ :param headers: (optional) Dictionary of HTTP Headers to send with the
533
+ :class:`Request`.
534
+ :param cookies: (optional) Dict or CookieJar object to send with the
535
+ :class:`Request`.
536
+ :param files: (optional) Dictionary of ``'filename': file-like-objects``
537
+ for multipart encoding upload.
538
+ :param auth: (optional) Auth tuple or callable to enable
539
+ Basic/Digest/Custom HTTP Auth.
540
+ :param timeout: (optional) How long to wait for the server to send
541
+ data before giving up, as a float, or a :ref:`(connect timeout,
542
+ read timeout) <timeouts>` tuple.
543
+ :type timeout: float or tuple
544
+ :param allow_redirects: (optional) Set to True by default.
545
+ :type allow_redirects: bool
546
+ :param proxies: (optional) Dictionary mapping protocol or protocol and
547
+ hostname to the URL of the proxy.
548
+ :param stream: (optional) whether to immediately download the response
549
+ content. Defaults to ``False``.
550
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
551
+ the server's TLS certificate, or a string, in which case it must be a path
552
+ to a CA bundle to use. Defaults to ``True``. When set to
553
+ ``False``, requests will accept any TLS certificate presented by
554
+ the server, and will ignore hostname mismatches and/or expired
555
+ certificates, which will make your application vulnerable to
556
+ man-in-the-middle (MitM) attacks. Setting verify to ``False``
557
+ may be useful during local development or testing.
558
+ :param cert: (optional) if String, path to ssl client cert file (.pem).
559
+ If Tuple, ('cert', 'key') pair.
560
+ :rtype: requests.Response
561
+ """
562
+ # Create the Request.
563
+ req = Request(
564
+ method=method.upper(),
565
+ url=url,
566
+ headers=headers,
567
+ files=files,
568
+ data=data or {},
569
+ json=json,
570
+ params=params or {},
571
+ auth=auth,
572
+ cookies=cookies,
573
+ hooks=hooks,
574
+ )
575
+ prep = self.prepare_request(req)
576
+
577
+ proxies = proxies or {}
578
+
579
+ settings = self.merge_environment_settings(
580
+ prep.url, proxies, stream, verify, cert
581
+ )
582
+
583
+ # Send the request.
584
+ send_kwargs = {
585
+ "timeout": timeout,
586
+ "allow_redirects": allow_redirects,
587
+ }
588
+ send_kwargs.update(settings)
589
+ resp = self.send(prep, **send_kwargs)
590
+
591
+ return resp
592
+
593
+ def get(self, url, **kwargs):
594
+ r"""Sends a GET request. Returns :class:`Response` object.
595
+
596
+ :param url: URL for the new :class:`Request` object.
597
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
598
+ :rtype: requests.Response
599
+ """
600
+
601
+ kwargs.setdefault("allow_redirects", True)
602
+ return self.request("GET", url, **kwargs)
603
+
604
+ def options(self, url, **kwargs):
605
+ r"""Sends a OPTIONS request. Returns :class:`Response` object.
606
+
607
+ :param url: URL for the new :class:`Request` object.
608
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
609
+ :rtype: requests.Response
610
+ """
611
+
612
+ kwargs.setdefault("allow_redirects", True)
613
+ return self.request("OPTIONS", url, **kwargs)
614
+
615
+ def head(self, url, **kwargs):
616
+ r"""Sends a HEAD request. Returns :class:`Response` object.
617
+
618
+ :param url: URL for the new :class:`Request` object.
619
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
620
+ :rtype: requests.Response
621
+ """
622
+
623
+ kwargs.setdefault("allow_redirects", False)
624
+ return self.request("HEAD", url, **kwargs)
625
+
626
+ def post(self, url, data=None, json=None, **kwargs):
627
+ r"""Sends a POST request. Returns :class:`Response` object.
628
+
629
+ :param url: URL for the new :class:`Request` object.
630
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
631
+ object to send in the body of the :class:`Request`.
632
+ :param json: (optional) json to send in the body of the :class:`Request`.
633
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
634
+ :rtype: requests.Response
635
+ """
636
+
637
+ return self.request("POST", url, data=data, json=json, **kwargs)
638
+
639
+ def put(self, url, data=None, **kwargs):
640
+ r"""Sends a PUT request. Returns :class:`Response` object.
641
+
642
+ :param url: URL for the new :class:`Request` object.
643
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
644
+ object to send in the body of the :class:`Request`.
645
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
646
+ :rtype: requests.Response
647
+ """
648
+
649
+ return self.request("PUT", url, data=data, **kwargs)
650
+
651
+ def patch(self, url, data=None, **kwargs):
652
+ r"""Sends a PATCH request. Returns :class:`Response` object.
653
+
654
+ :param url: URL for the new :class:`Request` object.
655
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
656
+ object to send in the body of the :class:`Request`.
657
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
658
+ :rtype: requests.Response
659
+ """
660
+
661
+ return self.request("PATCH", url, data=data, **kwargs)
662
+
663
+ def delete(self, url, **kwargs):
664
+ r"""Sends a DELETE request. Returns :class:`Response` object.
665
+
666
+ :param url: URL for the new :class:`Request` object.
667
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
668
+ :rtype: requests.Response
669
+ """
670
+
671
+ return self.request("DELETE", url, **kwargs)
672
+
673
+ def send(self, request, **kwargs):
674
+ """Send a given PreparedRequest.
675
+
676
+ :rtype: requests.Response
677
+ """
678
+ # Set defaults that the hooks can utilize to ensure they always have
679
+ # the correct parameters to reproduce the previous request.
680
+ kwargs.setdefault("stream", self.stream)
681
+ kwargs.setdefault("verify", self.verify)
682
+ kwargs.setdefault("cert", self.cert)
683
+ if "proxies" not in kwargs:
684
+ kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
685
+
686
+ # It's possible that users might accidentally send a Request object.
687
+ # Guard against that specific failure case.
688
+ if isinstance(request, Request):
689
+ raise ValueError("You can only send PreparedRequests.")
690
+
691
+ # Set up variables needed for resolve_redirects and dispatching of hooks
692
+ allow_redirects = kwargs.pop("allow_redirects", True)
693
+ stream = kwargs.get("stream")
694
+ hooks = request.hooks
695
+
696
+ # Get the appropriate adapter to use
697
+ adapter = self.get_adapter(url=request.url)
698
+
699
+ # Start time (approximately) of the request
700
+ start = preferred_clock()
701
+
702
+ # Send the request
703
+ r = adapter.send(request, **kwargs)
704
+
705
+ # Total elapsed time of the request (approximately)
706
+ elapsed = preferred_clock() - start
707
+ r.elapsed = timedelta(seconds=elapsed)
708
+
709
+ # Response manipulation hooks
710
+ r = dispatch_hook("response", hooks, r, **kwargs)
711
+
712
+ # Persist cookies
713
+ if r.history:
714
+
715
+ # If the hooks create history then we want those cookies too
716
+ for resp in r.history:
717
+ extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
718
+
719
+ extract_cookies_to_jar(self.cookies, request, r.raw)
720
+
721
+ # Resolve redirects if allowed.
722
+ if allow_redirects:
723
+ # Redirect resolving generator.
724
+ gen = self.resolve_redirects(r, request, **kwargs)
725
+ history = [resp for resp in gen]
726
+ else:
727
+ history = []
728
+
729
+ # Shuffle things around if there's history.
730
+ if history:
731
+ # Insert the first (original) request at the start
732
+ history.insert(0, r)
733
+ # Get the last request made
734
+ r = history.pop()
735
+ r.history = history
736
+
737
+ # If redirects aren't being followed, store the response on the Request for Response.next().
738
+ if not allow_redirects:
739
+ try:
740
+ r._next = next(
741
+ self.resolve_redirects(r, request, yield_requests=True, **kwargs)
742
+ )
743
+ except StopIteration:
744
+ pass
745
+
746
+ if not stream:
747
+ r.content
748
+
749
+ return r
750
+
751
+ def merge_environment_settings(self, url, proxies, stream, verify, cert):
752
+ """
753
+ Check the environment and merge it with some settings.
754
+
755
+ :rtype: dict
756
+ """
757
+ # Gather clues from the surrounding environment.
758
+ if self.trust_env:
759
+ # Set environment's proxies.
760
+ no_proxy = proxies.get("no_proxy") if proxies is not None else None
761
+ env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
762
+ for (k, v) in env_proxies.items():
763
+ proxies.setdefault(k, v)
764
+
765
+ # Look for requests environment configuration
766
+ # and be compatible with cURL.
767
+ if verify is True or verify is None:
768
+ verify = (
769
+ os.environ.get("REQUESTS_CA_BUNDLE")
770
+ or os.environ.get("CURL_CA_BUNDLE")
771
+ or verify
772
+ )
773
+
774
+ # Merge all the kwargs.
775
+ proxies = merge_setting(proxies, self.proxies)
776
+ stream = merge_setting(stream, self.stream)
777
+ verify = merge_setting(verify, self.verify)
778
+ cert = merge_setting(cert, self.cert)
779
+
780
+ return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
781
+
782
+ def get_adapter(self, url):
783
+ """
784
+ Returns the appropriate connection adapter for the given URL.
785
+
786
+ :rtype: requests.adapters.BaseAdapter
787
+ """
788
+ for (prefix, adapter) in self.adapters.items():
789
+
790
+ if url.lower().startswith(prefix.lower()):
791
+ return adapter
792
+
793
+ # Nothing matches :-/
794
+ raise InvalidSchema(f"No connection adapters were found for {url!r}")
795
+
796
+ def close(self):
797
+ """Closes all adapters and as such the session"""
798
+ for v in self.adapters.values():
799
+ v.close()
800
+
801
+ def mount(self, prefix, adapter):
802
+ """Registers a connection adapter to a prefix.
803
+
804
+ Adapters are sorted in descending order by prefix length.
805
+ """
806
+ self.adapters[prefix] = adapter
807
+ keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
808
+
809
+ for key in keys_to_move:
810
+ self.adapters[key] = self.adapters.pop(key)
811
+
812
+ def __getstate__(self):
813
+ state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
814
+ return state
815
+
816
+ def __setstate__(self, state):
817
+ for attr, value in state.items():
818
+ setattr(self, attr, value)
819
+
820
+
821
+ def session():
822
+ """
823
+ Returns a :class:`Session` for context-management.
824
+
825
+ .. deprecated:: 1.0.0
826
+
827
+ This method has been deprecated since version 1.0.0 and is only kept for
828
+ backwards compatibility. New code should use :class:`~requests.sessions.Session`
829
+ to create a session. This may be removed at a future date.
830
+
831
+ :rtype: Session
832
+ """
833
+ return Session()
llmeval-env/lib/python3.10/site-packages/requests/utils.py ADDED
@@ -0,0 +1,1094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.utils
3
+ ~~~~~~~~~~~~~~
4
+
5
+ This module provides utility functions that are used within Requests
6
+ that are also useful for external consumption.
7
+ """
8
+
9
+ import codecs
10
+ import contextlib
11
+ import io
12
+ import os
13
+ import re
14
+ import socket
15
+ import struct
16
+ import sys
17
+ import tempfile
18
+ import warnings
19
+ import zipfile
20
+ from collections import OrderedDict
21
+
22
+ from urllib3.util import make_headers, parse_url
23
+
24
+ from . import certs
25
+ from .__version__ import __version__
26
+
27
+ # to_native_string is unused here, but imported here for backwards compatibility
28
+ from ._internal_utils import ( # noqa: F401
29
+ _HEADER_VALIDATORS_BYTE,
30
+ _HEADER_VALIDATORS_STR,
31
+ HEADER_VALIDATORS,
32
+ to_native_string,
33
+ )
34
+ from .compat import (
35
+ Mapping,
36
+ basestring,
37
+ bytes,
38
+ getproxies,
39
+ getproxies_environment,
40
+ integer_types,
41
+ )
42
+ from .compat import parse_http_list as _parse_list_header
43
+ from .compat import (
44
+ proxy_bypass,
45
+ proxy_bypass_environment,
46
+ quote,
47
+ str,
48
+ unquote,
49
+ urlparse,
50
+ urlunparse,
51
+ )
52
+ from .cookies import cookiejar_from_dict
53
+ from .exceptions import (
54
+ FileModeWarning,
55
+ InvalidHeader,
56
+ InvalidURL,
57
+ UnrewindableBodyError,
58
+ )
59
+ from .structures import CaseInsensitiveDict
60
+
61
+ NETRC_FILES = (".netrc", "_netrc")
62
+
63
+ DEFAULT_CA_BUNDLE_PATH = certs.where()
64
+
65
+ DEFAULT_PORTS = {"http": 80, "https": 443}
66
+
67
+ # Ensure that ', ' is used to preserve previous delimiter behavior.
68
+ DEFAULT_ACCEPT_ENCODING = ", ".join(
69
+ re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
70
+ )
71
+
72
+
73
+ if sys.platform == "win32":
74
+ # provide a proxy_bypass version on Windows without DNS lookups
75
+
76
+ def proxy_bypass_registry(host):
77
+ try:
78
+ import winreg
79
+ except ImportError:
80
+ return False
81
+
82
+ try:
83
+ internetSettings = winreg.OpenKey(
84
+ winreg.HKEY_CURRENT_USER,
85
+ r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
86
+ )
87
+ # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
88
+ proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
89
+ # ProxyOverride is almost always a string
90
+ proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
91
+ except (OSError, ValueError):
92
+ return False
93
+ if not proxyEnable or not proxyOverride:
94
+ return False
95
+
96
+ # make a check value list from the registry entry: replace the
97
+ # '<local>' string by the localhost entry and the corresponding
98
+ # canonical entry.
99
+ proxyOverride = proxyOverride.split(";")
100
+ # now check if we match one of the registry values.
101
+ for test in proxyOverride:
102
+ if test == "<local>":
103
+ if "." not in host:
104
+ return True
105
+ test = test.replace(".", r"\.") # mask dots
106
+ test = test.replace("*", r".*") # change glob sequence
107
+ test = test.replace("?", r".") # change glob char
108
+ if re.match(test, host, re.I):
109
+ return True
110
+ return False
111
+
112
+ def proxy_bypass(host): # noqa
113
+ """Return True, if the host should be bypassed.
114
+
115
+ Checks proxy settings gathered from the environment, if specified,
116
+ or the registry.
117
+ """
118
+ if getproxies_environment():
119
+ return proxy_bypass_environment(host)
120
+ else:
121
+ return proxy_bypass_registry(host)
122
+
123
+
124
+ def dict_to_sequence(d):
125
+ """Returns an internal sequence dictionary update."""
126
+
127
+ if hasattr(d, "items"):
128
+ d = d.items()
129
+
130
+ return d
131
+
132
+
133
+ def super_len(o):
134
+ total_length = None
135
+ current_position = 0
136
+
137
+ if hasattr(o, "__len__"):
138
+ total_length = len(o)
139
+
140
+ elif hasattr(o, "len"):
141
+ total_length = o.len
142
+
143
+ elif hasattr(o, "fileno"):
144
+ try:
145
+ fileno = o.fileno()
146
+ except (io.UnsupportedOperation, AttributeError):
147
+ # AttributeError is a surprising exception, seeing as how we've just checked
148
+ # that `hasattr(o, 'fileno')`. It happens for objects obtained via
149
+ # `Tarfile.extractfile()`, per issue 5229.
150
+ pass
151
+ else:
152
+ total_length = os.fstat(fileno).st_size
153
+
154
+ # Having used fstat to determine the file length, we need to
155
+ # confirm that this file was opened up in binary mode.
156
+ if "b" not in o.mode:
157
+ warnings.warn(
158
+ (
159
+ "Requests has determined the content-length for this "
160
+ "request using the binary size of the file: however, the "
161
+ "file has been opened in text mode (i.e. without the 'b' "
162
+ "flag in the mode). This may lead to an incorrect "
163
+ "content-length. In Requests 3.0, support will be removed "
164
+ "for files in text mode."
165
+ ),
166
+ FileModeWarning,
167
+ )
168
+
169
+ if hasattr(o, "tell"):
170
+ try:
171
+ current_position = o.tell()
172
+ except OSError:
173
+ # This can happen in some weird situations, such as when the file
174
+ # is actually a special file descriptor like stdin. In this
175
+ # instance, we don't know what the length is, so set it to zero and
176
+ # let requests chunk it instead.
177
+ if total_length is not None:
178
+ current_position = total_length
179
+ else:
180
+ if hasattr(o, "seek") and total_length is None:
181
+ # StringIO and BytesIO have seek but no usable fileno
182
+ try:
183
+ # seek to end of file
184
+ o.seek(0, 2)
185
+ total_length = o.tell()
186
+
187
+ # seek back to current position to support
188
+ # partially read file-like objects
189
+ o.seek(current_position or 0)
190
+ except OSError:
191
+ total_length = 0
192
+
193
+ if total_length is None:
194
+ total_length = 0
195
+
196
+ return max(0, total_length - current_position)
197
+
198
+
199
+ def get_netrc_auth(url, raise_errors=False):
200
+ """Returns the Requests tuple auth for a given url from netrc."""
201
+
202
+ netrc_file = os.environ.get("NETRC")
203
+ if netrc_file is not None:
204
+ netrc_locations = (netrc_file,)
205
+ else:
206
+ netrc_locations = (f"~/{f}" for f in NETRC_FILES)
207
+
208
+ try:
209
+ from netrc import NetrcParseError, netrc
210
+
211
+ netrc_path = None
212
+
213
+ for f in netrc_locations:
214
+ try:
215
+ loc = os.path.expanduser(f)
216
+ except KeyError:
217
+ # os.path.expanduser can fail when $HOME is undefined and
218
+ # getpwuid fails. See https://bugs.python.org/issue20164 &
219
+ # https://github.com/psf/requests/issues/1846
220
+ return
221
+
222
+ if os.path.exists(loc):
223
+ netrc_path = loc
224
+ break
225
+
226
+ # Abort early if there isn't one.
227
+ if netrc_path is None:
228
+ return
229
+
230
+ ri = urlparse(url)
231
+
232
+ # Strip port numbers from netloc. This weird `if...encode`` dance is
233
+ # used for Python 3.2, which doesn't support unicode literals.
234
+ splitstr = b":"
235
+ if isinstance(url, str):
236
+ splitstr = splitstr.decode("ascii")
237
+ host = ri.netloc.split(splitstr)[0]
238
+
239
+ try:
240
+ _netrc = netrc(netrc_path).authenticators(host)
241
+ if _netrc:
242
+ # Return with login / password
243
+ login_i = 0 if _netrc[0] else 1
244
+ return (_netrc[login_i], _netrc[2])
245
+ except (NetrcParseError, OSError):
246
+ # If there was a parsing error or a permissions issue reading the file,
247
+ # we'll just skip netrc auth unless explicitly asked to raise errors.
248
+ if raise_errors:
249
+ raise
250
+
251
+ # App Engine hackiness.
252
+ except (ImportError, AttributeError):
253
+ pass
254
+
255
+
256
+ def guess_filename(obj):
257
+ """Tries to guess the filename of the given object."""
258
+ name = getattr(obj, "name", None)
259
+ if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
260
+ return os.path.basename(name)
261
+
262
+
263
+ def extract_zipped_paths(path):
264
+ """Replace nonexistent paths that look like they refer to a member of a zip
265
+ archive with the location of an extracted copy of the target, or else
266
+ just return the provided path unchanged.
267
+ """
268
+ if os.path.exists(path):
269
+ # this is already a valid path, no need to do anything further
270
+ return path
271
+
272
+ # find the first valid part of the provided path and treat that as a zip archive
273
+ # assume the rest of the path is the name of a member in the archive
274
+ archive, member = os.path.split(path)
275
+ while archive and not os.path.exists(archive):
276
+ archive, prefix = os.path.split(archive)
277
+ if not prefix:
278
+ # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
279
+ # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
280
+ break
281
+ member = "/".join([prefix, member])
282
+
283
+ if not zipfile.is_zipfile(archive):
284
+ return path
285
+
286
+ zip_file = zipfile.ZipFile(archive)
287
+ if member not in zip_file.namelist():
288
+ return path
289
+
290
+ # we have a valid zip archive and a valid member of that archive
291
+ tmp = tempfile.gettempdir()
292
+ extracted_path = os.path.join(tmp, member.split("/")[-1])
293
+ if not os.path.exists(extracted_path):
294
+ # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
295
+ with atomic_open(extracted_path) as file_handler:
296
+ file_handler.write(zip_file.read(member))
297
+ return extracted_path
298
+
299
+
300
+ @contextlib.contextmanager
301
+ def atomic_open(filename):
302
+ """Write a file to the disk in an atomic fashion"""
303
+ tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
304
+ try:
305
+ with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
306
+ yield tmp_handler
307
+ os.replace(tmp_name, filename)
308
+ except BaseException:
309
+ os.remove(tmp_name)
310
+ raise
311
+
312
+
313
+ def from_key_val_list(value):
314
+ """Take an object and test to see if it can be represented as a
315
+ dictionary. Unless it can not be represented as such, return an
316
+ OrderedDict, e.g.,
317
+
318
+ ::
319
+
320
+ >>> from_key_val_list([('key', 'val')])
321
+ OrderedDict([('key', 'val')])
322
+ >>> from_key_val_list('string')
323
+ Traceback (most recent call last):
324
+ ...
325
+ ValueError: cannot encode objects that are not 2-tuples
326
+ >>> from_key_val_list({'key': 'val'})
327
+ OrderedDict([('key', 'val')])
328
+
329
+ :rtype: OrderedDict
330
+ """
331
+ if value is None:
332
+ return None
333
+
334
+ if isinstance(value, (str, bytes, bool, int)):
335
+ raise ValueError("cannot encode objects that are not 2-tuples")
336
+
337
+ return OrderedDict(value)
338
+
339
+
340
+ def to_key_val_list(value):
341
+ """Take an object and test to see if it can be represented as a
342
+ dictionary. If it can be, return a list of tuples, e.g.,
343
+
344
+ ::
345
+
346
+ >>> to_key_val_list([('key', 'val')])
347
+ [('key', 'val')]
348
+ >>> to_key_val_list({'key': 'val'})
349
+ [('key', 'val')]
350
+ >>> to_key_val_list('string')
351
+ Traceback (most recent call last):
352
+ ...
353
+ ValueError: cannot encode objects that are not 2-tuples
354
+
355
+ :rtype: list
356
+ """
357
+ if value is None:
358
+ return None
359
+
360
+ if isinstance(value, (str, bytes, bool, int)):
361
+ raise ValueError("cannot encode objects that are not 2-tuples")
362
+
363
+ if isinstance(value, Mapping):
364
+ value = value.items()
365
+
366
+ return list(value)
367
+
368
+
369
+ # From mitsuhiko/werkzeug (used with permission).
370
+ def parse_list_header(value):
371
+ """Parse lists as described by RFC 2068 Section 2.
372
+
373
+ In particular, parse comma-separated lists where the elements of
374
+ the list may include quoted-strings. A quoted-string could
375
+ contain a comma. A non-quoted string could have quotes in the
376
+ middle. Quotes are removed automatically after parsing.
377
+
378
+ It basically works like :func:`parse_set_header` just that items
379
+ may appear multiple times and case sensitivity is preserved.
380
+
381
+ The return value is a standard :class:`list`:
382
+
383
+ >>> parse_list_header('token, "quoted value"')
384
+ ['token', 'quoted value']
385
+
386
+ To create a header from the :class:`list` again, use the
387
+ :func:`dump_header` function.
388
+
389
+ :param value: a string with a list header.
390
+ :return: :class:`list`
391
+ :rtype: list
392
+ """
393
+ result = []
394
+ for item in _parse_list_header(value):
395
+ if item[:1] == item[-1:] == '"':
396
+ item = unquote_header_value(item[1:-1])
397
+ result.append(item)
398
+ return result
399
+
400
+
401
+ # From mitsuhiko/werkzeug (used with permission).
402
+ def parse_dict_header(value):
403
+ """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
404
+ convert them into a python dict:
405
+
406
+ >>> d = parse_dict_header('foo="is a fish", bar="as well"')
407
+ >>> type(d) is dict
408
+ True
409
+ >>> sorted(d.items())
410
+ [('bar', 'as well'), ('foo', 'is a fish')]
411
+
412
+ If there is no value for a key it will be `None`:
413
+
414
+ >>> parse_dict_header('key_without_value')
415
+ {'key_without_value': None}
416
+
417
+ To create a header from the :class:`dict` again, use the
418
+ :func:`dump_header` function.
419
+
420
+ :param value: a string with a dict header.
421
+ :return: :class:`dict`
422
+ :rtype: dict
423
+ """
424
+ result = {}
425
+ for item in _parse_list_header(value):
426
+ if "=" not in item:
427
+ result[item] = None
428
+ continue
429
+ name, value = item.split("=", 1)
430
+ if value[:1] == value[-1:] == '"':
431
+ value = unquote_header_value(value[1:-1])
432
+ result[name] = value
433
+ return result
434
+
435
+
436
+ # From mitsuhiko/werkzeug (used with permission).
437
+ def unquote_header_value(value, is_filename=False):
438
+ r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
439
+ This does not use the real unquoting but what browsers are actually
440
+ using for quoting.
441
+
442
+ :param value: the header value to unquote.
443
+ :rtype: str
444
+ """
445
+ if value and value[0] == value[-1] == '"':
446
+ # this is not the real unquoting, but fixing this so that the
447
+ # RFC is met will result in bugs with internet explorer and
448
+ # probably some other browsers as well. IE for example is
449
+ # uploading files with "C:\foo\bar.txt" as filename
450
+ value = value[1:-1]
451
+
452
+ # if this is a filename and the starting characters look like
453
+ # a UNC path, then just return the value without quotes. Using the
454
+ # replace sequence below on a UNC path has the effect of turning
455
+ # the leading double slash into a single slash and then
456
+ # _fix_ie_filename() doesn't work correctly. See #458.
457
+ if not is_filename or value[:2] != "\\\\":
458
+ return value.replace("\\\\", "\\").replace('\\"', '"')
459
+ return value
460
+
461
+
462
+ def dict_from_cookiejar(cj):
463
+ """Returns a key/value dictionary from a CookieJar.
464
+
465
+ :param cj: CookieJar object to extract cookies from.
466
+ :rtype: dict
467
+ """
468
+
469
+ cookie_dict = {}
470
+
471
+ for cookie in cj:
472
+ cookie_dict[cookie.name] = cookie.value
473
+
474
+ return cookie_dict
475
+
476
+
477
+ def add_dict_to_cookiejar(cj, cookie_dict):
478
+ """Returns a CookieJar from a key/value dictionary.
479
+
480
+ :param cj: CookieJar to insert cookies into.
481
+ :param cookie_dict: Dict of key/values to insert into CookieJar.
482
+ :rtype: CookieJar
483
+ """
484
+
485
+ return cookiejar_from_dict(cookie_dict, cj)
486
+
487
+
488
+ def get_encodings_from_content(content):
489
+ """Returns encodings from given content string.
490
+
491
+ :param content: bytestring to extract encodings from.
492
+ """
493
+ warnings.warn(
494
+ (
495
+ "In requests 3.0, get_encodings_from_content will be removed. For "
496
+ "more information, please see the discussion on issue #2266. (This"
497
+ " warning should only appear once.)"
498
+ ),
499
+ DeprecationWarning,
500
+ )
501
+
502
+ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
503
+ pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
504
+ xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
505
+
506
+ return (
507
+ charset_re.findall(content)
508
+ + pragma_re.findall(content)
509
+ + xml_re.findall(content)
510
+ )
511
+
512
+
513
+ def _parse_content_type_header(header):
514
+ """Returns content type and parameters from given header
515
+
516
+ :param header: string
517
+ :return: tuple containing content type and dictionary of
518
+ parameters
519
+ """
520
+
521
+ tokens = header.split(";")
522
+ content_type, params = tokens[0].strip(), tokens[1:]
523
+ params_dict = {}
524
+ items_to_strip = "\"' "
525
+
526
+ for param in params:
527
+ param = param.strip()
528
+ if param:
529
+ key, value = param, True
530
+ index_of_equals = param.find("=")
531
+ if index_of_equals != -1:
532
+ key = param[:index_of_equals].strip(items_to_strip)
533
+ value = param[index_of_equals + 1 :].strip(items_to_strip)
534
+ params_dict[key.lower()] = value
535
+ return content_type, params_dict
536
+
537
+
538
+ def get_encoding_from_headers(headers):
539
+ """Returns encodings from given HTTP Header Dict.
540
+
541
+ :param headers: dictionary to extract encoding from.
542
+ :rtype: str
543
+ """
544
+
545
+ content_type = headers.get("content-type")
546
+
547
+ if not content_type:
548
+ return None
549
+
550
+ content_type, params = _parse_content_type_header(content_type)
551
+
552
+ if "charset" in params:
553
+ return params["charset"].strip("'\"")
554
+
555
+ if "text" in content_type:
556
+ return "ISO-8859-1"
557
+
558
+ if "application/json" in content_type:
559
+ # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
560
+ return "utf-8"
561
+
562
+
563
+ def stream_decode_response_unicode(iterator, r):
564
+ """Stream decodes an iterator."""
565
+
566
+ if r.encoding is None:
567
+ yield from iterator
568
+ return
569
+
570
+ decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
571
+ for chunk in iterator:
572
+ rv = decoder.decode(chunk)
573
+ if rv:
574
+ yield rv
575
+ rv = decoder.decode(b"", final=True)
576
+ if rv:
577
+ yield rv
578
+
579
+
580
+ def iter_slices(string, slice_length):
581
+ """Iterate over slices of a string."""
582
+ pos = 0
583
+ if slice_length is None or slice_length <= 0:
584
+ slice_length = len(string)
585
+ while pos < len(string):
586
+ yield string[pos : pos + slice_length]
587
+ pos += slice_length
588
+
589
+
590
+ def get_unicode_from_response(r):
591
+ """Returns the requested content back in unicode.
592
+
593
+ :param r: Response object to get unicode content from.
594
+
595
+ Tried:
596
+
597
+ 1. charset from content-type
598
+ 2. fall back and replace all unicode characters
599
+
600
+ :rtype: str
601
+ """
602
+ warnings.warn(
603
+ (
604
+ "In requests 3.0, get_unicode_from_response will be removed. For "
605
+ "more information, please see the discussion on issue #2266. (This"
606
+ " warning should only appear once.)"
607
+ ),
608
+ DeprecationWarning,
609
+ )
610
+
611
+ tried_encodings = []
612
+
613
+ # Try charset from content-type
614
+ encoding = get_encoding_from_headers(r.headers)
615
+
616
+ if encoding:
617
+ try:
618
+ return str(r.content, encoding)
619
+ except UnicodeError:
620
+ tried_encodings.append(encoding)
621
+
622
+ # Fall back:
623
+ try:
624
+ return str(r.content, encoding, errors="replace")
625
+ except TypeError:
626
+ return r.content
627
+
628
+
629
+ # The unreserved URI characters (RFC 3986)
630
+ UNRESERVED_SET = frozenset(
631
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
632
+ )
633
+
634
+
635
+ def unquote_unreserved(uri):
636
+ """Un-escape any percent-escape sequences in a URI that are unreserved
637
+ characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
638
+
639
+ :rtype: str
640
+ """
641
+ parts = uri.split("%")
642
+ for i in range(1, len(parts)):
643
+ h = parts[i][0:2]
644
+ if len(h) == 2 and h.isalnum():
645
+ try:
646
+ c = chr(int(h, 16))
647
+ except ValueError:
648
+ raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
649
+
650
+ if c in UNRESERVED_SET:
651
+ parts[i] = c + parts[i][2:]
652
+ else:
653
+ parts[i] = f"%{parts[i]}"
654
+ else:
655
+ parts[i] = f"%{parts[i]}"
656
+ return "".join(parts)
657
+
658
+
659
+ def requote_uri(uri):
660
+ """Re-quote the given URI.
661
+
662
+ This function passes the given URI through an unquote/quote cycle to
663
+ ensure that it is fully and consistently quoted.
664
+
665
+ :rtype: str
666
+ """
667
+ safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
668
+ safe_without_percent = "!#$&'()*+,/:;=?@[]~"
669
+ try:
670
+ # Unquote only the unreserved characters
671
+ # Then quote only illegal characters (do not quote reserved,
672
+ # unreserved, or '%')
673
+ return quote(unquote_unreserved(uri), safe=safe_with_percent)
674
+ except InvalidURL:
675
+ # We couldn't unquote the given URI, so let's try quoting it, but
676
+ # there may be unquoted '%'s in the URI. We need to make sure they're
677
+ # properly quoted so they do not cause issues elsewhere.
678
+ return quote(uri, safe=safe_without_percent)
679
+
680
+
681
+ def address_in_network(ip, net):
682
+ """This function allows you to check if an IP belongs to a network subnet
683
+
684
+ Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
685
+ returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
686
+
687
+ :rtype: bool
688
+ """
689
+ ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
690
+ netaddr, bits = net.split("/")
691
+ netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
692
+ network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
693
+ return (ipaddr & netmask) == (network & netmask)
694
+
695
+
696
+ def dotted_netmask(mask):
697
+ """Converts mask from /xx format to xxx.xxx.xxx.xxx
698
+
699
+ Example: if mask is 24 function returns 255.255.255.0
700
+
701
+ :rtype: str
702
+ """
703
+ bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
704
+ return socket.inet_ntoa(struct.pack(">I", bits))
705
+
706
+
707
+ def is_ipv4_address(string_ip):
708
+ """
709
+ :rtype: bool
710
+ """
711
+ try:
712
+ socket.inet_aton(string_ip)
713
+ except OSError:
714
+ return False
715
+ return True
716
+
717
+
718
+ def is_valid_cidr(string_network):
719
+ """
720
+ Very simple check of the cidr format in no_proxy variable.
721
+
722
+ :rtype: bool
723
+ """
724
+ if string_network.count("/") == 1:
725
+ try:
726
+ mask = int(string_network.split("/")[1])
727
+ except ValueError:
728
+ return False
729
+
730
+ if mask < 1 or mask > 32:
731
+ return False
732
+
733
+ try:
734
+ socket.inet_aton(string_network.split("/")[0])
735
+ except OSError:
736
+ return False
737
+ else:
738
+ return False
739
+ return True
740
+
741
+
742
+ @contextlib.contextmanager
743
+ def set_environ(env_name, value):
744
+ """Set the environment variable 'env_name' to 'value'
745
+
746
+ Save previous value, yield, and then restore the previous value stored in
747
+ the environment variable 'env_name'.
748
+
749
+ If 'value' is None, do nothing"""
750
+ value_changed = value is not None
751
+ if value_changed:
752
+ old_value = os.environ.get(env_name)
753
+ os.environ[env_name] = value
754
+ try:
755
+ yield
756
+ finally:
757
+ if value_changed:
758
+ if old_value is None:
759
+ del os.environ[env_name]
760
+ else:
761
+ os.environ[env_name] = old_value
762
+
763
+
764
+ def should_bypass_proxies(url, no_proxy):
765
+ """
766
+ Returns whether we should bypass proxies or not.
767
+
768
+ :rtype: bool
769
+ """
770
+ # Prioritize lowercase environment variables over uppercase
771
+ # to keep a consistent behaviour with other http projects (curl, wget).
772
+ def get_proxy(key):
773
+ return os.environ.get(key) or os.environ.get(key.upper())
774
+
775
+ # First check whether no_proxy is defined. If it is, check that the URL
776
+ # we're getting isn't in the no_proxy list.
777
+ no_proxy_arg = no_proxy
778
+ if no_proxy is None:
779
+ no_proxy = get_proxy("no_proxy")
780
+ parsed = urlparse(url)
781
+
782
+ if parsed.hostname is None:
783
+ # URLs don't always have hostnames, e.g. file:/// urls.
784
+ return True
785
+
786
+ if no_proxy:
787
+ # We need to check whether we match here. We need to see if we match
788
+ # the end of the hostname, both with and without the port.
789
+ no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
790
+
791
+ if is_ipv4_address(parsed.hostname):
792
+ for proxy_ip in no_proxy:
793
+ if is_valid_cidr(proxy_ip):
794
+ if address_in_network(parsed.hostname, proxy_ip):
795
+ return True
796
+ elif parsed.hostname == proxy_ip:
797
+ # If no_proxy ip was defined in plain IP notation instead of cidr notation &
798
+ # matches the IP of the index
799
+ return True
800
+ else:
801
+ host_with_port = parsed.hostname
802
+ if parsed.port:
803
+ host_with_port += f":{parsed.port}"
804
+
805
+ for host in no_proxy:
806
+ if parsed.hostname.endswith(host) or host_with_port.endswith(host):
807
+ # The URL does match something in no_proxy, so we don't want
808
+ # to apply the proxies on this URL.
809
+ return True
810
+
811
+ with set_environ("no_proxy", no_proxy_arg):
812
+ # parsed.hostname can be `None` in cases such as a file URI.
813
+ try:
814
+ bypass = proxy_bypass(parsed.hostname)
815
+ except (TypeError, socket.gaierror):
816
+ bypass = False
817
+
818
+ if bypass:
819
+ return True
820
+
821
+ return False
822
+
823
+
824
+ def get_environ_proxies(url, no_proxy=None):
825
+ """
826
+ Return a dict of environment proxies.
827
+
828
+ :rtype: dict
829
+ """
830
+ if should_bypass_proxies(url, no_proxy=no_proxy):
831
+ return {}
832
+ else:
833
+ return getproxies()
834
+
835
+
836
+ def select_proxy(url, proxies):
837
+ """Select a proxy for the url, if applicable.
838
+
839
+ :param url: The url being for the request
840
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
841
+ """
842
+ proxies = proxies or {}
843
+ urlparts = urlparse(url)
844
+ if urlparts.hostname is None:
845
+ return proxies.get(urlparts.scheme, proxies.get("all"))
846
+
847
+ proxy_keys = [
848
+ urlparts.scheme + "://" + urlparts.hostname,
849
+ urlparts.scheme,
850
+ "all://" + urlparts.hostname,
851
+ "all",
852
+ ]
853
+ proxy = None
854
+ for proxy_key in proxy_keys:
855
+ if proxy_key in proxies:
856
+ proxy = proxies[proxy_key]
857
+ break
858
+
859
+ return proxy
860
+
861
+
862
+ def resolve_proxies(request, proxies, trust_env=True):
863
+ """This method takes proxy information from a request and configuration
864
+ input to resolve a mapping of target proxies. This will consider settings
865
+ such a NO_PROXY to strip proxy configurations.
866
+
867
+ :param request: Request or PreparedRequest
868
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
869
+ :param trust_env: Boolean declaring whether to trust environment configs
870
+
871
+ :rtype: dict
872
+ """
873
+ proxies = proxies if proxies is not None else {}
874
+ url = request.url
875
+ scheme = urlparse(url).scheme
876
+ no_proxy = proxies.get("no_proxy")
877
+ new_proxies = proxies.copy()
878
+
879
+ if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
880
+ environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
881
+
882
+ proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
883
+
884
+ if proxy:
885
+ new_proxies.setdefault(scheme, proxy)
886
+ return new_proxies
887
+
888
+
889
+ def default_user_agent(name="python-requests"):
890
+ """
891
+ Return a string representing the default user agent.
892
+
893
+ :rtype: str
894
+ """
895
+ return f"{name}/{__version__}"
896
+
897
+
898
+ def default_headers():
899
+ """
900
+ :rtype: requests.structures.CaseInsensitiveDict
901
+ """
902
+ return CaseInsensitiveDict(
903
+ {
904
+ "User-Agent": default_user_agent(),
905
+ "Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
906
+ "Accept": "*/*",
907
+ "Connection": "keep-alive",
908
+ }
909
+ )
910
+
911
+
912
+ def parse_header_links(value):
913
+ """Return a list of parsed link headers proxies.
914
+
915
+ i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
916
+
917
+ :rtype: list
918
+ """
919
+
920
+ links = []
921
+
922
+ replace_chars = " '\""
923
+
924
+ value = value.strip(replace_chars)
925
+ if not value:
926
+ return links
927
+
928
+ for val in re.split(", *<", value):
929
+ try:
930
+ url, params = val.split(";", 1)
931
+ except ValueError:
932
+ url, params = val, ""
933
+
934
+ link = {"url": url.strip("<> '\"")}
935
+
936
+ for param in params.split(";"):
937
+ try:
938
+ key, value = param.split("=")
939
+ except ValueError:
940
+ break
941
+
942
+ link[key.strip(replace_chars)] = value.strip(replace_chars)
943
+
944
+ links.append(link)
945
+
946
+ return links
947
+
948
+
949
+ # Null bytes; no need to recreate these on each call to guess_json_utf
950
+ _null = "\x00".encode("ascii") # encoding to ASCII for Python 3
951
+ _null2 = _null * 2
952
+ _null3 = _null * 3
953
+
954
+
955
+ def guess_json_utf(data):
956
+ """
957
+ :rtype: str
958
+ """
959
+ # JSON always starts with two ASCII characters, so detection is as
960
+ # easy as counting the nulls and from their location and count
961
+ # determine the encoding. Also detect a BOM, if present.
962
+ sample = data[:4]
963
+ if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
964
+ return "utf-32" # BOM included
965
+ if sample[:3] == codecs.BOM_UTF8:
966
+ return "utf-8-sig" # BOM included, MS style (discouraged)
967
+ if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
968
+ return "utf-16" # BOM included
969
+ nullcount = sample.count(_null)
970
+ if nullcount == 0:
971
+ return "utf-8"
972
+ if nullcount == 2:
973
+ if sample[::2] == _null2: # 1st and 3rd are null
974
+ return "utf-16-be"
975
+ if sample[1::2] == _null2: # 2nd and 4th are null
976
+ return "utf-16-le"
977
+ # Did not detect 2 valid UTF-16 ascii-range characters
978
+ if nullcount == 3:
979
+ if sample[:3] == _null3:
980
+ return "utf-32-be"
981
+ if sample[1:] == _null3:
982
+ return "utf-32-le"
983
+ # Did not detect a valid UTF-32 ascii-range character
984
+ return None
985
+
986
+
987
+ def prepend_scheme_if_needed(url, new_scheme):
988
+ """Given a URL that may or may not have a scheme, prepend the given scheme.
989
+ Does not replace a present scheme with the one provided as an argument.
990
+
991
+ :rtype: str
992
+ """
993
+ parsed = parse_url(url)
994
+ scheme, auth, host, port, path, query, fragment = parsed
995
+
996
+ # A defect in urlparse determines that there isn't a netloc present in some
997
+ # urls. We previously assumed parsing was overly cautious, and swapped the
998
+ # netloc and path. Due to a lack of tests on the original defect, this is
999
+ # maintained with parse_url for backwards compatibility.
1000
+ netloc = parsed.netloc
1001
+ if not netloc:
1002
+ netloc, path = path, netloc
1003
+
1004
+ if auth:
1005
+ # parse_url doesn't provide the netloc with auth
1006
+ # so we'll add it ourselves.
1007
+ netloc = "@".join([auth, netloc])
1008
+ if scheme is None:
1009
+ scheme = new_scheme
1010
+ if path is None:
1011
+ path = ""
1012
+
1013
+ return urlunparse((scheme, netloc, path, "", query, fragment))
1014
+
1015
+
1016
+ def get_auth_from_url(url):
1017
+ """Given a url with authentication components, extract them into a tuple of
1018
+ username,password.
1019
+
1020
+ :rtype: (str,str)
1021
+ """
1022
+ parsed = urlparse(url)
1023
+
1024
+ try:
1025
+ auth = (unquote(parsed.username), unquote(parsed.password))
1026
+ except (AttributeError, TypeError):
1027
+ auth = ("", "")
1028
+
1029
+ return auth
1030
+
1031
+
1032
+ def check_header_validity(header):
1033
+ """Verifies that header parts don't contain leading whitespace
1034
+ reserved characters, or return characters.
1035
+
1036
+ :param header: tuple, in the format (name, value).
1037
+ """
1038
+ name, value = header
1039
+ _validate_header_part(header, name, 0)
1040
+ _validate_header_part(header, value, 1)
1041
+
1042
+
1043
+ def _validate_header_part(header, header_part, header_validator_index):
1044
+ if isinstance(header_part, str):
1045
+ validator = _HEADER_VALIDATORS_STR[header_validator_index]
1046
+ elif isinstance(header_part, bytes):
1047
+ validator = _HEADER_VALIDATORS_BYTE[header_validator_index]
1048
+ else:
1049
+ raise InvalidHeader(
1050
+ f"Header part ({header_part!r}) from {header} "
1051
+ f"must be of type str or bytes, not {type(header_part)}"
1052
+ )
1053
+
1054
+ if not validator.match(header_part):
1055
+ header_kind = "name" if header_validator_index == 0 else "value"
1056
+ raise InvalidHeader(
1057
+ f"Invalid leading whitespace, reserved character(s), or return"
1058
+ f"character(s) in header {header_kind}: {header_part!r}"
1059
+ )
1060
+
1061
+
1062
+ def urldefragauth(url):
1063
+ """
1064
+ Given a url remove the fragment and the authentication part.
1065
+
1066
+ :rtype: str
1067
+ """
1068
+ scheme, netloc, path, params, query, fragment = urlparse(url)
1069
+
1070
+ # see func:`prepend_scheme_if_needed`
1071
+ if not netloc:
1072
+ netloc, path = path, netloc
1073
+
1074
+ netloc = netloc.rsplit("@", 1)[-1]
1075
+
1076
+ return urlunparse((scheme, netloc, path, params, query, ""))
1077
+
1078
+
1079
+ def rewind_body(prepared_request):
1080
+ """Move file pointer back to its recorded starting position
1081
+ so it can be read again on redirect.
1082
+ """
1083
+ body_seek = getattr(prepared_request.body, "seek", None)
1084
+ if body_seek is not None and isinstance(
1085
+ prepared_request._body_position, integer_types
1086
+ ):
1087
+ try:
1088
+ body_seek(prepared_request._body_position)
1089
+ except OSError:
1090
+ raise UnrewindableBodyError(
1091
+ "An error occurred when rewinding request body for redirect."
1092
+ )
1093
+ else:
1094
+ raise UnrewindableBodyError("Unable to rewind request body for redirect.")
llmeval-env/lib/python3.10/site-packages/rouge_score/create_pyrouge_files.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """For creating files from {target,prediction}.txt that can be processed
16
+ by pyrouge to compare with scores in scoring_test.py.
17
+
18
+ create_pyrouge_files -- --testdata_dir=`pwd`/testdata
19
+
20
+ # testConfidenceIntervalsAgainstRouge155WithStemming result
21
+ pyrouge_evaluate_plain_text_files \
22
+ -s /tmp/lkj -sfp "prediction.(.*).txt" \
23
+ -m /tmp/lkj -mfp target.#ID#.txt
24
+
25
+ pyrouge_evaluate_plain_text_files \
26
+ -s /tmp/lkj -sfp "prediction_multi.(.*).txt" \
27
+ -m /tmp/lkj -mfp target_multi.#ID#.txt
28
+ """
29
+
30
+ from __future__ import absolute_import
31
+ from __future__ import division
32
+ from __future__ import print_function
33
+
34
+ import os
35
+
36
+ from absl import app
37
+ from absl import flags
38
+
39
+ FLAGS = flags.FLAGS
40
+
41
+ flags.DEFINE_string('testdata_dir', '', 'testdata path')
42
+ flags.DEFINE_string('output', '/tmp/lkj', 'testdata path')
43
+
44
+
45
+ def main(argv):
46
+ if len(argv) > 1:
47
+ raise app.UsageError('Too many command-line arguments.')
48
+
49
+ # One line per target
50
+ with open(os.path.join(FLAGS.testdata_dir, 'target_large.txt')) as f:
51
+ targets = f.readlines()
52
+ with open(os.path.join(FLAGS.testdata_dir, 'prediction_large.txt')) as f:
53
+ predictions = f.readlines()
54
+
55
+ def write_files(prefix, items):
56
+ for i, t in enumerate(items):
57
+ out = '%s.%d.txt' % (prefix, i)
58
+ with open(os.path.join(FLAGS.output, out), 'w') as f:
59
+ f.write(t)
60
+ write_files('target', targets)
61
+ write_files('prediction', predictions)
62
+
63
+ # Delete this block
64
+ def write_files2(prefix, items):
65
+ index = 0
66
+ f = None
67
+ for i, t in enumerate(items):
68
+ # Write 4 lines per file
69
+ if i % 4 == 0:
70
+ if f:
71
+ f.close()
72
+ f = open(
73
+ os.path.join(FLAGS.output, '%s.%d.txt' % (prefix, index)),
74
+ 'w')
75
+ index += 1
76
+ f.write(t)
77
+ f.close()
78
+ write_files2('target_multi', targets)
79
+ write_files2('prediction_multi', predictions)
80
+
81
+
82
+ if __name__ == '__main__':
83
+ app.run(main)
llmeval-env/lib/python3.10/site-packages/rouge_score/rouge.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ r"""Main routine to calculate ROUGE scores across text files.
16
+
17
+ Designed to replicate scores computed by the ROUGE perl implementation as
18
+ closely as possible.
19
+
20
+ Output is a text file in CSV format.
21
+
22
+ Sample usage:
23
+
24
+ rouge ---rouge_types=rouge1,rouge2,rougeL \
25
+ --target_filepattern=*.targets \
26
+ --prediction_fliepattern=*.decodes \
27
+ --output_filename=scores.csv \
28
+ --use_stemmer
29
+
30
+ Which is equivalent to calling the perl ROUGE script as:
31
+
32
+ ROUGE-1.5.5.pl -m -e ./data -n 2 -a /tmp/rouge/settings.xml
33
+
34
+ Where settings.xml provides target and decode text.
35
+ """
36
+
37
+ from __future__ import absolute_import
38
+ from __future__ import division
39
+ from __future__ import print_function
40
+
41
+ from absl import app
42
+ from absl import flags
43
+ from rouge_score import io
44
+ from rouge_score import rouge_scorer
45
+ from rouge_score import scoring
46
+
47
+ flags.DEFINE_string("target_filepattern", None,
48
+ "Files containing target text.")
49
+ flags.DEFINE_string("prediction_filepattern", None,
50
+ "Files containing prediction text.")
51
+ flags.DEFINE_string("output_filename", None,
52
+ "File in which to write calculated ROUGE scores as a CSV.")
53
+ flags.DEFINE_string("delimiter", "\n",
54
+ "Record delimiter in files.")
55
+ flags.DEFINE_list("rouge_types", ["rouge1", "rouge2", "rougeL"],
56
+ "List of ROUGE types to calculate.")
57
+ flags.DEFINE_boolean("use_stemmer", False,
58
+ "Whether to use Porter stemmer to remove common suffixes.")
59
+ flags.DEFINE_boolean("aggregate", True,
60
+ "Write aggregates if this is set to True")
61
+ flags.DEFINE_boolean("split_summaries", False,
62
+ ("Whether to split references and candidates into"
63
+ " sentences before computing RougeLsum."))
64
+
65
+ FLAGS = flags.FLAGS
66
+
67
+
68
+ def main(argv):
69
+ if len(argv) > 1:
70
+ raise app.UsageError("Too many command-line arguments.")
71
+ scorer = rouge_scorer.RougeScorer(
72
+ FLAGS.rouge_types,
73
+ use_stemmer=FLAGS.use_stemmer,
74
+ split_summaries=FLAGS.split_summaries)
75
+ aggregator = scoring.BootstrapAggregator() if FLAGS.aggregate else None
76
+ io.compute_scores_and_write_to_csv(
77
+ FLAGS.target_filepattern,
78
+ FLAGS.prediction_filepattern,
79
+ FLAGS.output_filename,
80
+ scorer,
81
+ aggregator,
82
+ delimiter=FLAGS.delimiter)
83
+
84
+
85
+ if __name__ == "__main__":
86
+ flags.mark_flag_as_required("target_filepattern")
87
+ flags.mark_flag_as_required("prediction_filepattern")
88
+ flags.mark_flag_as_required("output_filename")
89
+ app.run(main)
llmeval-env/lib/python3.10/site-packages/rouge_score/rouge_scorer_test.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Tests for rouge scorer.
16
+
17
+ Tests for both correctness and for consistency with the official ROUGE-1.5.5
18
+ implementation.
19
+
20
+ "Ground truth" scores are taken from manual runs of ROUGE-1.5.5.
21
+ """
22
+
23
+ from __future__ import absolute_import
24
+ from __future__ import division
25
+ from __future__ import print_function
26
+
27
+ import os
28
+
29
+ from absl.testing import absltest
30
+ from absl.testing import parameterized
31
+ from rouge_score import rouge_scorer
32
+ from rouge_score import test_util
33
+ from rouge_score import tokenizers
34
+
35
+
36
+ class RougeScorerTest(parameterized.TestCase):
37
+
38
+ def setUp(self):
39
+ super(RougeScorerTest, self).setUp()
40
+ with open(test_util.TARGETS_FILE) as f:
41
+ self.targets = f.readlines()
42
+ with open(test_util.PREDICTIONS_FILE) as f:
43
+ self.predictions = f.readlines()
44
+
45
+ @parameterized.parameters(["rougen", "rouge0", "rouge10"])
46
+ def testInvalidRougeTypes(self, rouge_type):
47
+ with self.assertRaises(ValueError):
48
+ scorer = rouge_scorer.RougeScorer([rouge_type])
49
+ scorer.score("testing one two", "testing")
50
+
51
+ @parameterized.parameters(["rouge1", "rouge9", "rougeL", "rougeLsum"])
52
+ def testValidRougeTypes(self, rouge_type):
53
+ scorer = rouge_scorer.RougeScorer([rouge_type])
54
+ result = scorer.score("testing one two", "testing")
55
+ self.assertSameElements(list(result.keys()), [rouge_type])
56
+
57
+ def testRouge1(self):
58
+ scorer = rouge_scorer.RougeScorer(["rouge1"])
59
+ result = scorer.score("testing one two", "testing")
60
+ self.assertAlmostEqual(1, result["rouge1"].precision)
61
+ self.assertAlmostEqual(1 / 3, result["rouge1"].recall)
62
+ self.assertAlmostEqual(1 / 2, result["rouge1"].fmeasure)
63
+
64
+ def testRouge1Multi(self):
65
+ scorer = rouge_scorer.RougeScorer(["rouge1"])
66
+ result = scorer.score_multi(["testing one two"], "testing")
67
+ self.assertAlmostEqual(1, result["rouge1"].precision)
68
+ self.assertAlmostEqual(1 / 3, result["rouge1"].recall)
69
+ self.assertAlmostEqual(1 / 2, result["rouge1"].fmeasure)
70
+
71
+ def testRougeAllMulti(self):
72
+ scorer = rouge_scorer.RougeScorer(["rouge1", "rouge2", "rougeL"])
73
+ result = scorer.score_multi(["first text", "first something"], "text first")
74
+ self.assertAlmostEqual(1, result["rouge1"].fmeasure)
75
+ self.assertAlmostEqual(0, result["rouge2"].fmeasure)
76
+ self.assertAlmostEqual(0.5, result["rougeL"].fmeasure)
77
+
78
+ @parameterized.parameters(["rouge1", "rouge2", "rougeL", "rougeLsum"])
79
+ def testRougeEmpty(self, rouge_type):
80
+ scorer = rouge_scorer.RougeScorer([rouge_type])
81
+ result = scorer.score("testing one two", "")
82
+ self.assertAlmostEqual(0, result[rouge_type].precision)
83
+ self.assertAlmostEqual(0, result[rouge_type].recall)
84
+ self.assertAlmostEqual(0, result[rouge_type].fmeasure)
85
+
86
+ def testRouge2(self):
87
+ scorer = rouge_scorer.RougeScorer(["rouge2"])
88
+ result = scorer.score("testing one two", "testing one")
89
+ self.assertAlmostEqual(1, result["rouge2"].precision)
90
+ self.assertAlmostEqual(1 / 2, result["rouge2"].recall)
91
+ self.assertAlmostEqual(2 / 3, result["rouge2"].fmeasure)
92
+
93
+ def testRougeLConsecutive(self):
94
+ scorer = rouge_scorer.RougeScorer(["rougeL"])
95
+ result = scorer.score("testing one two", "testing one")
96
+ self.assertAlmostEqual(1, result["rougeL"].precision)
97
+ self.assertAlmostEqual(2 / 3, result["rougeL"].recall)
98
+ self.assertAlmostEqual(4 / 5, result["rougeL"].fmeasure)
99
+
100
+ def testRougeLNonConsecutive(self):
101
+ scorer = rouge_scorer.RougeScorer(["rougeL"])
102
+ result = scorer.score("testing one two", "testing two")
103
+ self.assertAlmostEqual(1, result["rougeL"].precision)
104
+ self.assertAlmostEqual(2 / 3, result["rougeL"].recall)
105
+ self.assertAlmostEqual(4 / 5, result["rougeL"].fmeasure)
106
+
107
+ def testMultipleRougeTypes(self):
108
+ scorer = rouge_scorer.RougeScorer(["rouge1", "rougeL"])
109
+ result = scorer.score("testing one two", "testing one")
110
+ self.assertSameElements(list(result.keys()), ["rouge1", "rougeL"])
111
+ self.assertAlmostEqual(1, result["rouge1"].precision)
112
+ self.assertAlmostEqual(2 / 3, result["rouge1"].recall)
113
+ self.assertAlmostEqual(4 / 5, result["rouge1"].fmeasure)
114
+ self.assertAlmostEqual(1, result["rougeL"].precision)
115
+ self.assertAlmostEqual(2 / 3, result["rougeL"].recall)
116
+ self.assertAlmostEqual(4 / 5, result["rougeL"].fmeasure)
117
+
118
+ def testRouge1AgainstRouge155(self):
119
+ scorer = rouge_scorer.RougeScorer(["rouge1"])
120
+ result = scorer.score(self.targets[0], self.predictions[0])
121
+ self.assertAlmostEqual(0.40741, result["rouge1"].recall, 5)
122
+ self.assertAlmostEqual(0.68750, result["rouge1"].precision, 5)
123
+ self.assertAlmostEqual(0.51163, result["rouge1"].fmeasure, 5)
124
+ result = scorer.score(self.targets[1], self.predictions[1])
125
+ self.assertAlmostEqual(0.40476, result["rouge1"].recall, 5)
126
+ self.assertAlmostEqual(0.65385, result["rouge1"].precision, 5)
127
+ self.assertAlmostEqual(0.50000, result["rouge1"].fmeasure, 5)
128
+
129
+ def testRouge1AgainstRouge155WithStemming(self):
130
+ scorer = rouge_scorer.RougeScorer(["rouge1"], use_stemmer=True)
131
+ result = scorer.score(self.targets[0], self.predictions[0])
132
+ self.assertAlmostEqual(0.40741, result["rouge1"].recall, 5)
133
+ self.assertAlmostEqual(0.68750, result["rouge1"].precision, 5)
134
+ self.assertAlmostEqual(0.51163, result["rouge1"].fmeasure, 5)
135
+ result = scorer.score(self.targets[1], self.predictions[1])
136
+ self.assertAlmostEqual(0.42857, result["rouge1"].recall, 5)
137
+ self.assertAlmostEqual(0.69231, result["rouge1"].precision, 5)
138
+ self.assertAlmostEqual(0.52941, result["rouge1"].fmeasure, 5)
139
+
140
+ def testRouge2AgainstRouge155(self):
141
+ scorer = rouge_scorer.RougeScorer(["rouge2"])
142
+ result = scorer.score(self.targets[0], self.predictions[0])
143
+ self.assertAlmostEqual(0.30769, result["rouge2"].recall, 5)
144
+ self.assertAlmostEqual(0.53333, result["rouge2"].precision, 5)
145
+ self.assertAlmostEqual(0.39024, result["rouge2"].fmeasure, 5)
146
+ result = scorer.score(self.targets[1], self.predictions[1])
147
+ self.assertAlmostEqual(0.29268, result["rouge2"].recall, 5)
148
+ self.assertAlmostEqual(0.48000, result["rouge2"].precision, 5)
149
+ self.assertAlmostEqual(0.36364, result["rouge2"].fmeasure, 5)
150
+
151
+ def testRouge2AgainstRouge155WithStemming(self):
152
+ scorer = rouge_scorer.RougeScorer(["rouge2"], use_stemmer=True)
153
+ result = scorer.score(self.targets[0], self.predictions[0])
154
+ self.assertAlmostEqual(0.30769, result["rouge2"].recall, 5)
155
+ self.assertAlmostEqual(0.53333, result["rouge2"].precision, 5)
156
+ self.assertAlmostEqual(0.39024, result["rouge2"].fmeasure, 5)
157
+ result = scorer.score(self.targets[1], self.predictions[1])
158
+ self.assertAlmostEqual(0.29268, result["rouge2"].recall, 5)
159
+ self.assertAlmostEqual(0.48000, result["rouge2"].precision, 5)
160
+ self.assertAlmostEqual(0.36364, result["rouge2"].fmeasure, 5)
161
+
162
+ def testRougeLAgainstRouge155(self):
163
+ scorer = rouge_scorer.RougeScorer(["rougeL"])
164
+ result = scorer.score(self.targets[0], self.predictions[0])
165
+ self.assertAlmostEqual(0.40741, result["rougeL"].recall, 5)
166
+ self.assertAlmostEqual(0.68750, result["rougeL"].precision, 5)
167
+ self.assertAlmostEqual(0.51163, result["rougeL"].fmeasure, 5)
168
+ result = scorer.score(self.targets[1], self.predictions[1])
169
+ self.assertAlmostEqual(0.40476, result["rougeL"].recall, 5)
170
+ self.assertAlmostEqual(0.65385, result["rougeL"].precision, 5)
171
+ self.assertAlmostEqual(0.50000, result["rougeL"].fmeasure, 5)
172
+
173
+ def testRougeLSumAgainstRouge155WithStemming(self):
174
+ scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
175
+
176
+ target = test_util.get_text(
177
+ os.path.join(test_util.PYROUGE_DIR, "target_multi.0.txt"))
178
+ prediction = test_util.get_text(
179
+ os.path.join(test_util.PYROUGE_DIR, "prediction_multi.0.txt"))
180
+ result = scorer.score(target, prediction)
181
+
182
+ self.assertAlmostEqual(0.36538, result["rougeLsum"].recall, places=5)
183
+ self.assertAlmostEqual(0.66667, result["rougeLsum"].precision, places=5)
184
+ self.assertAlmostEqual(0.47205, result["rougeLsum"].fmeasure, places=5)
185
+
186
+ def testRougeLSumSentenceSplitting(self):
187
+ scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
188
+
189
+ target = "First sentence.\nSecond Sentence."
190
+ prediction = "Second sentence.\nFirst Sentence."
191
+ result = scorer.score(target, prediction)
192
+ self.assertAlmostEqual(1.0, result["rougeLsum"].fmeasure, places=5)
193
+
194
+ scorer = rouge_scorer.RougeScorer(["rougeLsum"],
195
+ use_stemmer=True,
196
+ split_summaries=False)
197
+ result = scorer.score(target, prediction)
198
+
199
+ # Without newlines, summaries are treated as single sentences.
200
+ target = target.replace("\n", " ")
201
+ prediction = prediction.replace("\n", " ")
202
+ result = scorer.score(target, prediction)
203
+ self.assertAlmostEqual(0.50, result["rougeLsum"].fmeasure, places=5)
204
+
205
+ # Split summaries into sentences using nltk
206
+ scorer = rouge_scorer.RougeScorer(["rougeLsum"],
207
+ use_stemmer=True,
208
+ split_summaries=True)
209
+ result = scorer.score(target, prediction)
210
+
211
+ self.assertAlmostEqual(1.0, result["rougeLsum"].fmeasure, places=5)
212
+
213
+ def testLcsTable(self):
214
+ ref = [1, 2, 3, 4, 5]
215
+ c1 = [2, 5, 3, 4]
216
+ t = rouge_scorer._lcs_table(ref, c1)
217
+ self.assertEqual(3, t[len(ref)][len(c1)])
218
+ def _read_lcs(t, ref, can):
219
+ return rouge_scorer._backtrack_norec(t, ref, can)
220
+ # Indices
221
+ self.assertEqual([1, 2, 3],
222
+ _read_lcs(t, ref, c1))
223
+ # Values
224
+ self.assertEqual([2, 3, 4],
225
+ [ref[i] for i in _read_lcs(t, ref, c1)])
226
+
227
+ # No common subsequence.
228
+ c2 = [8, 9]
229
+ t = rouge_scorer._lcs_table(ref, c2)
230
+ self.assertEqual(0, t[len(ref)][len(c2)])
231
+ self.assertEqual([],
232
+ _read_lcs(t, ref, c2))
233
+
234
+ def testUnionLcs(self):
235
+ # Example in Section 3.2 of https://www.aclweb.org/anthology/W04-1013,
236
+ # except using indices into ref.
237
+
238
+ # First test helper.
239
+ lcs1 = [0, 1] # lcs [1, 2]
240
+ lcs2 = [0, 2, 4]
241
+ self.assertEqual([0, 1, 2, 4], rouge_scorer._find_union([lcs1, lcs2]))
242
+ self.assertEqual([0, 1, 2, 4], rouge_scorer._find_union([lcs2, lcs1]))
243
+
244
+ ref = [1, 2, 3, 4, 5]
245
+ c1 = [1, 2, 6, 7, 8] # lcs = [1, 2]
246
+ c2 = [1, 3, 8, 9, 5] # lcs = [1, 3, 5]
247
+ self.assertEqual([1, 2, 3, 5],
248
+ rouge_scorer._union_lcs(ref, [c1, c2]))
249
+ self.assertEqual([1, 2, 3, 5],
250
+ rouge_scorer._union_lcs(ref, [c1, c2]))
251
+
252
+ def testSummaryLevelLcs(self):
253
+ refs = [
254
+ [1, 2, 3, 4, 5]
255
+ ]
256
+ cans = [
257
+ [1, 2, 6, 7, 8], # lcs = [1, 2]
258
+ [1, 3, 8, 9, 5] # lcs = [1, 3, 5]
259
+ ]
260
+ score = rouge_scorer._summary_level_lcs(refs, cans)
261
+ self.assertEqual(0.8, score.recall) # 4 / 5
262
+ self.assertEqual(0.4, score.precision) # 4 / 10
263
+ # 0.4*0.8 / (0.4 + 0.8)
264
+ self.assertAlmostEqual(0.5333, score.fmeasure, places=3)
265
+
266
+ # Tokenizer may drop all tokens, resulting in empty candidate list.
267
+ score = rouge_scorer._summary_level_lcs([["reference"]], [[]])
268
+ self.assertEqual(0.0, score.recall)
269
+
270
+ def testRougeLsum(self):
271
+ scorer = rouge_scorer.RougeScorer(["rougeLsum"])
272
+ result = scorer.score("w1 w2 w3 w4 w5", "w1 w2 w6 w7 w8\nw1 w3 w8 w9 w5")
273
+ self.assertAlmostEqual(0.8, result["rougeLsum"].recall)
274
+ self.assertAlmostEqual(0.4, result["rougeLsum"].precision)
275
+ self.assertAlmostEqual(0.5333, result["rougeLsum"].fmeasure, places=3)
276
+
277
+ # Empty case
278
+ result = scorer.score("w1 w2 w3 w4 w5", "")
279
+ self.assertAlmostEqual(0.0, result["rougeLsum"].fmeasure, places=3)
280
+ self.assertAlmostEqual(0.0, result["rougeLsum"].recall, places=3)
281
+ self.assertAlmostEqual(0.0, result["rougeLsum"].precision, places=3)
282
+
283
+ result = scorer.score("", "w1")
284
+ self.assertAlmostEqual(0.0, result["rougeLsum"].fmeasure, places=3)
285
+ self.assertAlmostEqual(0.0, result["rougeLsum"].recall, places=3)
286
+ self.assertAlmostEqual(0.0, result["rougeLsum"].precision, places=3)
287
+
288
+ # Case in which summary is all non-word characters.
289
+ result = scorer.score("w1 w2 w3 w4 w5", "/")
290
+ self.assertAlmostEqual(0.0, result["rougeLsum"].fmeasure, places=3)
291
+ self.assertAlmostEqual(0.0, result["rougeLsum"].recall, places=3)
292
+ self.assertAlmostEqual(0.0, result["rougeLsum"].precision, places=3)
293
+
294
+ def testRougeLsumLarge(self):
295
+ with open(test_util.LARGE_PREDICTIONS_FILE) as f:
296
+ prediction = f.read()
297
+ with open(test_util.LARGE_TARGETS_FILE) as f:
298
+ target = f.read()
299
+ scorer = rouge_scorer.RougeScorer(["rougeLsum"])
300
+ result = scorer.score(target, prediction)
301
+ self.assertAlmostEqual(0.533, result["rougeLsum"].fmeasure, places=3)
302
+
303
+ def testRougeTokenizerInit(self):
304
+ scorer = rouge_scorer.RougeScorer(["rouge1"],
305
+ tokenizer=tokenizers.DefaultTokenizer())
306
+
307
+ target = "this is a test"
308
+ prediction = target
309
+ result = scorer.score(target, prediction)
310
+ self.assertEqual(1.0, result["rouge1"].fmeasure)
311
+
312
+
313
+ if __name__ == "__main__":
314
+ absltest.main()
llmeval-env/lib/python3.10/site-packages/rouge_score/scoring.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Library for scoring and evaluation of text samples.
16
+
17
+ Aggregation functions use bootstrap resampling to compute confidence intervals
18
+ as per the original ROUGE perl implementation.
19
+ """
20
+
21
+ from __future__ import absolute_import
22
+ from __future__ import division
23
+ from __future__ import print_function
24
+
25
+ import abc
26
+ import collections
27
+ from typing import Dict
28
+
29
+ import numpy as np
30
+ import six
31
+ from six.moves import range
32
+
33
+
34
+ class Score(
35
+ collections.namedtuple("Score", ["precision", "recall", "fmeasure"])):
36
+ """Tuple containing precision, recall, and f-measure values."""
37
+
38
+
39
+ class BaseScorer(object, metaclass=abc.ABCMeta):
40
+ """Base class for Scorer objects."""
41
+
42
+ @abc.abstractmethod
43
+ def score(self, target: str, prediction: str) -> Dict[str, Score]:
44
+ """Calculates score between the target and prediction.
45
+
46
+ Args:
47
+ target: Text containing the target (ground truth) text.
48
+ prediction: Text containing the predicted text.
49
+
50
+ Returns:
51
+ A dict mapping each score_type (string) to Score object.
52
+ """
53
+
54
+
55
+ class AggregateScore(
56
+ collections.namedtuple("AggregateScore", ["low", "mid", "high"])):
57
+ """Tuple containing confidence intervals for scores."""
58
+
59
+
60
+ class BootstrapAggregator(object):
61
+ """Aggregates scores to provide confidence intervals.
62
+
63
+ Sample usage:
64
+ scorer = rouge_scorer.RougeScorer(['rouge1', 'rougeL'])
65
+ aggregator = Aggregator()
66
+ aggregator.add_scores(scorer.score("one two three", "one two"))
67
+ aggregator.add_scores(scorer.score("one two five six", "seven eight"))
68
+ result = aggregator.aggregate()
69
+ print result
70
+ {'rougeL': AggregateScore(
71
+ low=Score(precision=0.0, recall=0.0, fmeasure=0.0),
72
+ mid=Score(precision=0.5, recall=0.33, fmeasure=0.40),
73
+ high=Score(precision=1.0, recall=0.66, fmeasure=0.80)),
74
+ 'rouge1': AggregateScore(
75
+ low=Score(precision=0.0, recall=0.0, fmeasure=0.0),
76
+ mid=Score(precision=0.5, recall=0.33, fmeasure=0.40),
77
+ high=Score(precision=1.0, recall=0.66, fmeasure=0.80))}
78
+ """
79
+
80
+ def __init__(self, confidence_interval=0.95, n_samples=1000):
81
+ """Initializes a BootstrapAggregator object.
82
+
83
+ Args:
84
+ confidence_interval: Confidence interval to compute on the mean as a
85
+ decimal.
86
+ n_samples: Number of samples to use for bootstrap resampling.
87
+
88
+ Raises:
89
+ ValueError: If invalid argument is given.
90
+ """
91
+
92
+ if confidence_interval < 0 or confidence_interval > 1:
93
+ raise ValueError("confidence_interval must be in range [0, 1]")
94
+ if n_samples <= 0:
95
+ raise ValueError("n_samples must be positive")
96
+
97
+ self._n_samples = n_samples
98
+ self._confidence_interval = confidence_interval
99
+ self._scores = collections.defaultdict(list)
100
+
101
+ def add_scores(self, scores):
102
+ """Adds a sample for future aggregation.
103
+
104
+ Args:
105
+ scores: Dict mapping score_type strings to a namedtuple object/class
106
+ representing a score.
107
+ """
108
+
109
+ for score_type, score in six.iteritems(scores):
110
+ self._scores[score_type].append(score)
111
+
112
+ def aggregate(self):
113
+ """Aggregates scores previously added using add_scores.
114
+
115
+ Returns:
116
+ A dict mapping score_type to AggregateScore objects.
117
+ """
118
+
119
+ result = {}
120
+ for score_type, scores in six.iteritems(self._scores):
121
+ # Stack scores into a 2-d matrix of (sample, measure).
122
+ score_matrix = np.vstack(tuple(scores))
123
+ # Percentiles are returned as (interval, measure).
124
+ percentiles = self._bootstrap_resample(score_matrix)
125
+ # Extract the three intervals (low, mid, high).
126
+ intervals = tuple(
127
+ (scores[0].__class__(*percentiles[j, :]) for j in range(3)))
128
+ result[score_type] = AggregateScore(
129
+ low=intervals[0], mid=intervals[1], high=intervals[2])
130
+ return result
131
+
132
+ def _bootstrap_resample(self, matrix):
133
+ """Performs bootstrap resampling on a matrix of scores.
134
+
135
+ Args:
136
+ matrix: A 2-d matrix of (sample, measure).
137
+
138
+ Returns:
139
+ A 2-d matrix of (bounds, measure). There are three bounds: low (row 0),
140
+ mid (row 1) and high (row 2). Mid is always the mean, while low and high
141
+ bounds are specified by self._confidence_interval (which defaults to 0.95
142
+ meaning it will return the 2.5th and 97.5th percentiles for a 95%
143
+ confidence interval on the mean).
144
+ """
145
+
146
+ # Matrix of (bootstrap sample, measure).
147
+ sample_mean = np.zeros((self._n_samples, matrix.shape[1]))
148
+ for i in range(self._n_samples):
149
+ sample_idx = np.random.choice(
150
+ np.arange(matrix.shape[0]), size=matrix.shape[0])
151
+ sample = matrix[sample_idx, :]
152
+ sample_mean[i, :] = np.mean(sample, axis=0)
153
+
154
+ # Take percentiles on the estimate of the mean using bootstrap samples.
155
+ # Final result is a (bounds, measure) matrix.
156
+ percentile_delta = (1 - self._confidence_interval) / 2
157
+ q = 100 * np.array([percentile_delta, 0.5, 1 - percentile_delta])
158
+ return np.percentile(sample_mean, q, axis=0)
159
+
160
+
161
+ def fmeasure(precision, recall):
162
+ """Computes f-measure given precision and recall values."""
163
+
164
+ if precision + recall > 0:
165
+ return 2 * precision * recall / (precision + recall)
166
+ else:
167
+ return 0.0
llmeval-env/lib/python3.10/site-packages/rouge_score/scoring_test.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Tests for rouge scoring and aggregation.
16
+
17
+ Checks for both correctness, and for consistency with values from the perl ROUGE
18
+ implementation which this package replicates.
19
+ """
20
+
21
+ from __future__ import absolute_import
22
+ from __future__ import division
23
+ from __future__ import print_function
24
+
25
+ import os
26
+
27
+ from absl.testing import absltest
28
+ import numpy as np
29
+ from six.moves import range
30
+ from six.moves import zip
31
+ from rouge_score import rouge_scorer
32
+ from rouge_score import scoring
33
+ from rouge_score import test_util
34
+
35
+ # Delta for matching against ground truth rouge values. Must be relatively
36
+ # high compared to the individual rouge tests since bootstrap sampling
37
+ # introduces randomness.
38
+ _DELTA = 0.002
39
+
40
+ # Use a fixed random seed, or tests may fail with nonzero probability.
41
+ _RANDOM_SEED = 123
42
+
43
+
44
+ class BootstrapAggregatorTest(absltest.TestCase):
45
+
46
+ def setUp(self):
47
+ super(BootstrapAggregatorTest, self).setUp()
48
+ np.random.seed(_RANDOM_SEED)
49
+ with open(test_util.LARGE_TARGETS_FILE) as f:
50
+ self.targets = f.readlines()
51
+ with open(test_util.LARGE_PREDICTIONS_FILE) as f:
52
+ self.predictions = f.readlines()
53
+
54
+ def assertSimilarAggregates(self, precision, recall, fmeasure, aggregate,
55
+ delta=_DELTA):
56
+ """Helper method for asserting matching aggregate scores.
57
+
58
+ Args:
59
+ precision: Tuple of (low, mid, high) precision scores.
60
+ recall: Tuple of (low, mid, high) recall scores.
61
+ fmeasure: Tuple of (low, mid, high) fmeasure scores.
62
+ aggregate: An AggregateScore object.
63
+ delta: Tolerance delta for matching values.
64
+ """
65
+
66
+ self.assertAlmostEqual(precision[0], aggregate.low.precision, delta=delta)
67
+ self.assertAlmostEqual(precision[1], aggregate.mid.precision, delta=delta)
68
+ self.assertAlmostEqual(precision[2], aggregate.high.precision, delta=delta)
69
+ self.assertAlmostEqual(recall[0], aggregate.low.recall, delta=delta)
70
+ self.assertAlmostEqual(recall[1], aggregate.mid.recall, delta=delta)
71
+ self.assertAlmostEqual(recall[2], aggregate.high.recall, delta=delta)
72
+ self.assertAlmostEqual(fmeasure[0], aggregate.low.fmeasure, delta=delta)
73
+ self.assertAlmostEqual(fmeasure[1], aggregate.mid.fmeasure, delta=delta)
74
+ self.assertAlmostEqual(fmeasure[2], aggregate.high.fmeasure, delta=delta)
75
+
76
+ def testConsistentPercentiles(self):
77
+ aggregator = scoring.BootstrapAggregator(confidence_interval=0.9)
78
+ aggregator.add_scores({
79
+ "rouge1": scoring.Score(precision=1, recall=1 / 3, fmeasure=1 / 2)
80
+ })
81
+ aggregator.add_scores({
82
+ "rouge1": scoring.Score(precision=0, recall=0, fmeasure=0)
83
+ })
84
+ aggregator.add_scores({
85
+ "rouge1": scoring.Score(precision=1, recall=1, fmeasure=1)
86
+ })
87
+ result = aggregator.aggregate()
88
+
89
+ self.assertSimilarAggregates((1 / 3, 2 / 3, 3 / 3),
90
+ (1 / 9, 4 / 9, 7 / 9),
91
+ (1 / 6, 3 / 6, 5 / 6),
92
+ result["rouge1"], delta=1e-8)
93
+
94
+ def testLargeConfidence(self):
95
+ aggregator = scoring.BootstrapAggregator(confidence_interval=0.0)
96
+ aggregator.add_scores({
97
+ "rouge1": scoring.Score(precision=1, recall=1 / 3, fmeasure=1 / 2)
98
+ })
99
+ aggregator.add_scores({
100
+ "rouge1": scoring.Score(precision=0, recall=0, fmeasure=0)
101
+ })
102
+ aggregator.add_scores({
103
+ "rouge1": scoring.Score(precision=1, recall=1, fmeasure=1)
104
+ })
105
+ result = aggregator.aggregate()
106
+
107
+ self.assertSimilarAggregates((2 / 3, 2 / 3, 2 / 3),
108
+ (4 / 9, 4 / 9, 4 / 9),
109
+ (3 / 6, 3 / 6, 3 / 6),
110
+ result["rouge1"], delta=1e-8)
111
+
112
+ def testMultipleRougeTypes(self):
113
+ scorer = rouge_scorer.RougeScorer(["rouge1", "rougeL"], use_stemmer=False)
114
+ aggregator = scoring.BootstrapAggregator()
115
+ for target, prediction in zip(self.targets[:5], self.predictions[:5]):
116
+ aggregator.add_scores(scorer.score(target, prediction))
117
+ result = aggregator.aggregate()
118
+
119
+ self.assertSameElements(list(result.keys()), ["rouge1", "rougeL"])
120
+
121
+ def testConfidenceIntervalsAgainstRouge155(self):
122
+ scorer = rouge_scorer.RougeScorer(["rouge1"], use_stemmer=False)
123
+ aggregator = scoring.BootstrapAggregator()
124
+ for target, prediction in zip(self.targets, self.predictions):
125
+ aggregator.add_scores(scorer.score(target, prediction))
126
+ result = aggregator.aggregate()
127
+
128
+ self.assertSimilarAggregates((0.48695, 0.49879, 0.51131),
129
+ (0.31106, 0.31950, 0.32849),
130
+ (0.37614, 0.38554, 0.39581),
131
+ result["rouge1"])
132
+
133
+ def testConfidenceIntervalsAgainstRouge155WithStemming(self):
134
+ scorer = rouge_scorer.RougeScorer(["rouge1", "rougeL"], use_stemmer=True)
135
+ aggregator = scoring.BootstrapAggregator()
136
+ for target, prediction in zip(self.targets, self.predictions):
137
+ aggregator.add_scores(scorer.score(target, prediction))
138
+ result = aggregator.aggregate()
139
+
140
+ self.assertSimilarAggregates((0.51027, 0.52434, 0.53788),
141
+ (0.32563, 0.33580, 0.34548),
142
+ (0.39380, 0.40524, 0.41661),
143
+ result["rouge1"])
144
+ self.assertSimilarAggregates((0.50759, 0.52104, 0.53382), # P
145
+ (0.32418, 0.33377, 0.34362), # R
146
+ (0.39157, 0.40275, 0.41383), # F
147
+ result["rougeL"])
148
+
149
+ def testConfidenceIntervalsAgainstRouge155WithStemmingMultiLine(self):
150
+ scorer = rouge_scorer.RougeScorer(
151
+ ["rouge1", "rouge2", "rougeLsum"], use_stemmer=True)
152
+ aggregator = scoring.BootstrapAggregator()
153
+ t_files = [os.path.join(test_util.PYROUGE_DIR, 'target_multi.%d.txt' % i) for i in range(0, 250)]
154
+ p_files = [os.path.join(test_util.PYROUGE_DIR, 'prediction_multi.%d.txt' % i) for i in range(0, 250)]
155
+
156
+ targets = [test_util.get_text(x) for x in t_files]
157
+ predictions = [test_util.get_text(x) for x in p_files]
158
+ assert len(targets) == len(predictions)
159
+ assert len(targets) == 250
160
+ for target, prediction in zip(targets, predictions):
161
+ aggregator.add_scores(scorer.score(target, prediction))
162
+ result = aggregator.aggregate()
163
+
164
+ # DIR = testdata/pyrouge_evaluate_plain_text_files
165
+ # pyrouge_evaluate_plain_text_files -s $DIR -sfp "prediction_multi.(.*).txt"
166
+ # -m $DIR -mfp target_multi.#ID#.txt
167
+ self.assertSimilarAggregates((0.58963, 0.59877, 0.60822), # P
168
+ (0.37327, 0.38091, 0.38914), # R
169
+ (0.45607, 0.46411, 0.47244), # F
170
+ result["rouge1"])
171
+ self.assertSimilarAggregates((0.35429, 0.36516, 0.37665), # P
172
+ (0.22341, 0.23109, 0.23916), # R
173
+ (0.27312, 0.28209, 0.29133), # F
174
+ result["rouge2"])
175
+ self.assertSimilarAggregates((0.58604, 0.59491, 0.60444), # P
176
+ (0.37084, 0.37846, 0.38671), # R
177
+ (0.45305, 0.46113, 0.46946), # F
178
+ result["rougeLsum"])
179
+
180
+
181
+ if __name__ == "__main__":
182
+ absltest.main()
llmeval-env/lib/python3.10/site-packages/rouge_score/test_util.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Test utils for ROUGE."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import os
22
+
23
+ _TESTDATA_PREFIX = os.path.join(os.path.dirname(__file__), "testdata")
24
+
25
+ TARGETS_FILE = os.path.join(_TESTDATA_PREFIX, "target.txt")
26
+
27
+ PREDICTIONS_FILE = os.path.join(_TESTDATA_PREFIX, "prediction.txt")
28
+
29
+ LARGE_TARGETS_FILE = os.path.join(_TESTDATA_PREFIX, "target_large.txt")
30
+
31
+ LARGE_PREDICTIONS_FILE = os.path.join(_TESTDATA_PREFIX, "prediction_large.txt")
32
+
33
+ DELIMITED_FILE = os.path.join(_TESTDATA_PREFIX, "delimited.txt")
34
+
35
+ PYROUGE_DIR = os.path.join(_TESTDATA_PREFIX, "pyrouge_files")
36
+
37
+
38
+ def get_text(fname):
39
+ with open(fname) as f:
40
+ return f.read()
llmeval-env/lib/python3.10/site-packages/rouge_score/tokenize.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """A library for tokenizing text."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import re
22
+ import six
23
+
24
+
25
+ # Pre-compile regexes that are use often
26
+ NON_ALPHANUM_PATTERN = r"[^a-z0-9]+"
27
+ NON_ALPHANUM_RE = re.compile(NON_ALPHANUM_PATTERN)
28
+ SPACES_PATTERN = r"\s+"
29
+ SPACES_RE = re.compile(SPACES_PATTERN)
30
+ VALID_TOKEN_PATTERN = r"^[a-z0-9]+$"
31
+ VALID_TOKEN_RE = re.compile(VALID_TOKEN_PATTERN)
32
+
33
+
34
+ def tokenize(text, stemmer):
35
+ """Tokenize input text into a list of tokens.
36
+
37
+ This approach aims to replicate the approach taken by Chin-Yew Lin in
38
+ the original ROUGE implementation.
39
+
40
+ Args:
41
+ text: A text blob to tokenize.
42
+ stemmer: An optional stemmer.
43
+
44
+ Returns:
45
+ A list of string tokens extracted from input text.
46
+ """
47
+
48
+ # Convert everything to lowercase.
49
+ text = text.lower()
50
+ # Replace any non-alpha-numeric characters with spaces.
51
+ text = NON_ALPHANUM_RE.sub(" ", six.ensure_str(text))
52
+
53
+ tokens = SPACES_RE.split(text)
54
+ if stemmer:
55
+ # Only stem words more than 3 characters long.
56
+ tokens = [six.ensure_str(stemmer.stem(x)) if len(x) > 3 else x
57
+ for x in tokens]
58
+
59
+ # One final check to drop any empty or invalid tokens.
60
+ tokens = [x for x in tokens if VALID_TOKEN_RE.match(x)]
61
+
62
+ return tokens
llmeval-env/lib/python3.10/site-packages/rouge_score/tokenize_test.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Tests for tokenize."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ from absl.testing import absltest
22
+ from rouge_score import tokenize
23
+
24
+
25
+ class TokenizeTest(absltest.TestCase):
26
+
27
+ def test_give_me_a_name(self):
28
+ self.assertEqual(['one', 'two', 'three'],
29
+ tokenize.tokenize('one Two three', None))
30
+ self.assertEqual(['one', 'two', 'three'],
31
+ tokenize.tokenize('one\n Two \nthree', None))
32
+
33
+
34
+ if __name__ == '__main__':
35
+ absltest.main()
llmeval-env/lib/python3.10/site-packages/rouge_score/tokenizers.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The rouge_score Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Library containing Tokenizer definitions.
16
+
17
+ The RougeScorer class can be instantiated with the tokenizers defined here. New
18
+ tokenizers can be defined by creating a subclass of the Tokenizer abstract class
19
+ and overriding the tokenize() method.
20
+ """
21
+ import abc
22
+ from nltk.stem import porter
23
+ from rouge_score import tokenize
24
+
25
+
26
+ class Tokenizer(abc.ABC):
27
+ """Abstract base class for a tokenizer.
28
+
29
+ Subclasses of Tokenizer must implement the tokenize() method.
30
+ """
31
+
32
+ @abc.abstractmethod
33
+ def tokenize(self, text):
34
+ raise NotImplementedError("Tokenizer must override tokenize() method")
35
+
36
+
37
+ class DefaultTokenizer(Tokenizer):
38
+ """Default tokenizer which tokenizes on whitespace."""
39
+
40
+ def __init__(self, use_stemmer=False):
41
+ """Constructor for DefaultTokenizer.
42
+
43
+ Args:
44
+ use_stemmer: boolean, indicating whether Porter stemmer should be used to
45
+ strip word suffixes to improve matching.
46
+ """
47
+ self._stemmer = porter.PorterStemmer() if use_stemmer else None
48
+
49
+ def tokenize(self, text):
50
+ return tokenize.tokenize(text, self._stemmer)
llmeval-env/lib/python3.10/site-packages/urllib3/__init__.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ # Set default logging handler to avoid "No handler found" warnings.
8
+ import logging
9
+ import sys
10
+ import typing
11
+ import warnings
12
+ from logging import NullHandler
13
+
14
+ from . import exceptions
15
+ from ._base_connection import _TYPE_BODY
16
+ from ._collections import HTTPHeaderDict
17
+ from ._version import __version__
18
+ from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
19
+ from .filepost import _TYPE_FIELDS, encode_multipart_formdata
20
+ from .poolmanager import PoolManager, ProxyManager, proxy_from_url
21
+ from .response import BaseHTTPResponse, HTTPResponse
22
+ from .util.request import make_headers
23
+ from .util.retry import Retry
24
+ from .util.timeout import Timeout
25
+
26
+ # Ensure that Python is compiled with OpenSSL 1.1.1+
27
+ # If the 'ssl' module isn't available at all that's
28
+ # fine, we only care if the module is available.
29
+ try:
30
+ import ssl
31
+ except ImportError:
32
+ pass
33
+ else:
34
+ if not ssl.OPENSSL_VERSION.startswith("OpenSSL "): # Defensive:
35
+ warnings.warn(
36
+ "urllib3 v2 only supports OpenSSL 1.1.1+, currently "
37
+ f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. "
38
+ "See: https://github.com/urllib3/urllib3/issues/3020",
39
+ exceptions.NotOpenSSLWarning,
40
+ )
41
+ elif ssl.OPENSSL_VERSION_INFO < (1, 1, 1): # Defensive:
42
+ raise ImportError(
43
+ "urllib3 v2 only supports OpenSSL 1.1.1+, currently "
44
+ f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. "
45
+ "See: https://github.com/urllib3/urllib3/issues/2168"
46
+ )
47
+
48
+ __author__ = "Andrey Petrov ([email protected])"
49
+ __license__ = "MIT"
50
+ __version__ = __version__
51
+
52
+ __all__ = (
53
+ "HTTPConnectionPool",
54
+ "HTTPHeaderDict",
55
+ "HTTPSConnectionPool",
56
+ "PoolManager",
57
+ "ProxyManager",
58
+ "HTTPResponse",
59
+ "Retry",
60
+ "Timeout",
61
+ "add_stderr_logger",
62
+ "connection_from_url",
63
+ "disable_warnings",
64
+ "encode_multipart_formdata",
65
+ "make_headers",
66
+ "proxy_from_url",
67
+ "request",
68
+ "BaseHTTPResponse",
69
+ )
70
+
71
+ logging.getLogger(__name__).addHandler(NullHandler())
72
+
73
+
74
+ def add_stderr_logger(
75
+ level: int = logging.DEBUG,
76
+ ) -> logging.StreamHandler[typing.TextIO]:
77
+ """
78
+ Helper for quickly adding a StreamHandler to the logger. Useful for
79
+ debugging.
80
+
81
+ Returns the handler after adding it.
82
+ """
83
+ # This method needs to be in this __init__.py to get the __name__ correct
84
+ # even if urllib3 is vendored within another package.
85
+ logger = logging.getLogger(__name__)
86
+ handler = logging.StreamHandler()
87
+ handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
88
+ logger.addHandler(handler)
89
+ logger.setLevel(level)
90
+ logger.debug("Added a stderr logging handler to logger: %s", __name__)
91
+ return handler
92
+
93
+
94
+ # ... Clean up.
95
+ del NullHandler
96
+
97
+
98
+ # All warning filters *must* be appended unless you're really certain that they
99
+ # shouldn't be: otherwise, it's very hard for users to use most Python
100
+ # mechanisms to silence them.
101
+ # SecurityWarning's always go off by default.
102
+ warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
103
+ # InsecurePlatformWarning's don't vary between requests, so we keep it default.
104
+ warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
105
+
106
+
107
+ def disable_warnings(category: type[Warning] = exceptions.HTTPWarning) -> None:
108
+ """
109
+ Helper for quickly disabling all urllib3 warnings.
110
+ """
111
+ warnings.simplefilter("ignore", category)
112
+
113
+
114
+ _DEFAULT_POOL = PoolManager()
115
+
116
+
117
+ def request(
118
+ method: str,
119
+ url: str,
120
+ *,
121
+ body: _TYPE_BODY | None = None,
122
+ fields: _TYPE_FIELDS | None = None,
123
+ headers: typing.Mapping[str, str] | None = None,
124
+ preload_content: bool | None = True,
125
+ decode_content: bool | None = True,
126
+ redirect: bool | None = True,
127
+ retries: Retry | bool | int | None = None,
128
+ timeout: Timeout | float | int | None = 3,
129
+ json: typing.Any | None = None,
130
+ ) -> BaseHTTPResponse:
131
+ """
132
+ A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
133
+ Therefore, its side effects could be shared across dependencies relying on it.
134
+ To avoid side effects create a new ``PoolManager`` instance and use it instead.
135
+ The method does not accept low-level ``**urlopen_kw`` keyword arguments.
136
+
137
+ :param method:
138
+ HTTP request method (such as GET, POST, PUT, etc.)
139
+
140
+ :param url:
141
+ The URL to perform the request on.
142
+
143
+ :param body:
144
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
145
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
146
+
147
+ :param fields:
148
+ Data to encode and send in the request body.
149
+
150
+ :param headers:
151
+ Dictionary of custom headers to send, such as User-Agent,
152
+ If-None-Match, etc.
153
+
154
+ :param bool preload_content:
155
+ If True, the response's body will be preloaded into memory.
156
+
157
+ :param bool decode_content:
158
+ If True, will attempt to decode the body based on the
159
+ 'content-encoding' header.
160
+
161
+ :param redirect:
162
+ If True, automatically handle redirects (status codes 301, 302,
163
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
164
+ will disable redirect, too.
165
+
166
+ :param retries:
167
+ Configure the number of retries to allow before raising a
168
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
169
+
170
+ If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a
171
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
172
+ over different types of retries.
173
+ Pass an integer number to retry connection errors that many times,
174
+ but no other types of errors. Pass zero to never retry.
175
+
176
+ If ``False``, then retries are disabled and any exception is raised
177
+ immediately. Also, instead of raising a MaxRetryError on redirects,
178
+ the redirect response will be returned.
179
+
180
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
181
+
182
+ :param timeout:
183
+ If specified, overrides the default timeout for this one
184
+ request. It may be a float (in seconds) or an instance of
185
+ :class:`urllib3.util.Timeout`.
186
+
187
+ :param json:
188
+ Data to encode and send as JSON with UTF-encoded in the request body.
189
+ The ``"Content-Type"`` header will be set to ``"application/json"``
190
+ unless specified otherwise.
191
+ """
192
+
193
+ return _DEFAULT_POOL.request(
194
+ method,
195
+ url,
196
+ body=body,
197
+ fields=fields,
198
+ headers=headers,
199
+ preload_content=preload_content,
200
+ decode_content=decode_content,
201
+ redirect=redirect,
202
+ retries=retries,
203
+ timeout=timeout,
204
+ json=json,
205
+ )
206
+
207
+
208
+ if sys.platform == "emscripten":
209
+ from .contrib.emscripten import inject_into_urllib3 # noqa: 401
210
+
211
+ inject_into_urllib3()
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/_base_connection.cpython-310.pyc ADDED
Binary file (5.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/_request_methods.cpython-310.pyc ADDED
Binary file (9.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/_version.cpython-310.pyc ADDED
Binary file (252 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/connectionpool.cpython-310.pyc ADDED
Binary file (29.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/fields.cpython-310.pyc ADDED
Binary file (9.73 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/__pycache__/response.cpython-310.pyc ADDED
Binary file (32.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/_collections.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import typing
4
+ from collections import OrderedDict
5
+ from enum import Enum, auto
6
+ from threading import RLock
7
+
8
+ if typing.TYPE_CHECKING:
9
+ # We can only import Protocol if TYPE_CHECKING because it's a development
10
+ # dependency, and is not available at runtime.
11
+ from typing import Protocol
12
+
13
+ from typing_extensions import Self
14
+
15
+ class HasGettableStringKeys(Protocol):
16
+ def keys(self) -> typing.Iterator[str]:
17
+ ...
18
+
19
+ def __getitem__(self, key: str) -> str:
20
+ ...
21
+
22
+
23
+ __all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
24
+
25
+
26
+ # Key type
27
+ _KT = typing.TypeVar("_KT")
28
+ # Value type
29
+ _VT = typing.TypeVar("_VT")
30
+ # Default type
31
+ _DT = typing.TypeVar("_DT")
32
+
33
+ ValidHTTPHeaderSource = typing.Union[
34
+ "HTTPHeaderDict",
35
+ typing.Mapping[str, str],
36
+ typing.Iterable[typing.Tuple[str, str]],
37
+ "HasGettableStringKeys",
38
+ ]
39
+
40
+
41
+ class _Sentinel(Enum):
42
+ not_passed = auto()
43
+
44
+
45
+ def ensure_can_construct_http_header_dict(
46
+ potential: object,
47
+ ) -> ValidHTTPHeaderSource | None:
48
+ if isinstance(potential, HTTPHeaderDict):
49
+ return potential
50
+ elif isinstance(potential, typing.Mapping):
51
+ # Full runtime checking of the contents of a Mapping is expensive, so for the
52
+ # purposes of typechecking, we assume that any Mapping is the right shape.
53
+ return typing.cast(typing.Mapping[str, str], potential)
54
+ elif isinstance(potential, typing.Iterable):
55
+ # Similarly to Mapping, full runtime checking of the contents of an Iterable is
56
+ # expensive, so for the purposes of typechecking, we assume that any Iterable
57
+ # is the right shape.
58
+ return typing.cast(typing.Iterable[typing.Tuple[str, str]], potential)
59
+ elif hasattr(potential, "keys") and hasattr(potential, "__getitem__"):
60
+ return typing.cast("HasGettableStringKeys", potential)
61
+ else:
62
+ return None
63
+
64
+
65
+ class RecentlyUsedContainer(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]):
66
+ """
67
+ Provides a thread-safe dict-like container which maintains up to
68
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
69
+ ``maxsize``.
70
+
71
+ :param maxsize:
72
+ Maximum number of recent elements to retain.
73
+
74
+ :param dispose_func:
75
+ Every time an item is evicted from the container,
76
+ ``dispose_func(value)`` is called. Callback which will get called
77
+ """
78
+
79
+ _container: typing.OrderedDict[_KT, _VT]
80
+ _maxsize: int
81
+ dispose_func: typing.Callable[[_VT], None] | None
82
+ lock: RLock
83
+
84
+ def __init__(
85
+ self,
86
+ maxsize: int = 10,
87
+ dispose_func: typing.Callable[[_VT], None] | None = None,
88
+ ) -> None:
89
+ super().__init__()
90
+ self._maxsize = maxsize
91
+ self.dispose_func = dispose_func
92
+ self._container = OrderedDict()
93
+ self.lock = RLock()
94
+
95
+ def __getitem__(self, key: _KT) -> _VT:
96
+ # Re-insert the item, moving it to the end of the eviction line.
97
+ with self.lock:
98
+ item = self._container.pop(key)
99
+ self._container[key] = item
100
+ return item
101
+
102
+ def __setitem__(self, key: _KT, value: _VT) -> None:
103
+ evicted_item = None
104
+ with self.lock:
105
+ # Possibly evict the existing value of 'key'
106
+ try:
107
+ # If the key exists, we'll overwrite it, which won't change the
108
+ # size of the pool. Because accessing a key should move it to
109
+ # the end of the eviction line, we pop it out first.
110
+ evicted_item = key, self._container.pop(key)
111
+ self._container[key] = value
112
+ except KeyError:
113
+ # When the key does not exist, we insert the value first so that
114
+ # evicting works in all cases, including when self._maxsize is 0
115
+ self._container[key] = value
116
+ if len(self._container) > self._maxsize:
117
+ # If we didn't evict an existing value, and we've hit our maximum
118
+ # size, then we have to evict the least recently used item from
119
+ # the beginning of the container.
120
+ evicted_item = self._container.popitem(last=False)
121
+
122
+ # After releasing the lock on the pool, dispose of any evicted value.
123
+ if evicted_item is not None and self.dispose_func:
124
+ _, evicted_value = evicted_item
125
+ self.dispose_func(evicted_value)
126
+
127
+ def __delitem__(self, key: _KT) -> None:
128
+ with self.lock:
129
+ value = self._container.pop(key)
130
+
131
+ if self.dispose_func:
132
+ self.dispose_func(value)
133
+
134
+ def __len__(self) -> int:
135
+ with self.lock:
136
+ return len(self._container)
137
+
138
+ def __iter__(self) -> typing.NoReturn:
139
+ raise NotImplementedError(
140
+ "Iteration over this class is unlikely to be threadsafe."
141
+ )
142
+
143
+ def clear(self) -> None:
144
+ with self.lock:
145
+ # Copy pointers to all values, then wipe the mapping
146
+ values = list(self._container.values())
147
+ self._container.clear()
148
+
149
+ if self.dispose_func:
150
+ for value in values:
151
+ self.dispose_func(value)
152
+
153
+ def keys(self) -> set[_KT]: # type: ignore[override]
154
+ with self.lock:
155
+ return set(self._container.keys())
156
+
157
+
158
+ class HTTPHeaderDictItemView(typing.Set[typing.Tuple[str, str]]):
159
+ """
160
+ HTTPHeaderDict is unusual for a Mapping[str, str] in that it has two modes of
161
+ address.
162
+
163
+ If we directly try to get an item with a particular name, we will get a string
164
+ back that is the concatenated version of all the values:
165
+
166
+ >>> d['X-Header-Name']
167
+ 'Value1, Value2, Value3'
168
+
169
+ However, if we iterate over an HTTPHeaderDict's items, we will optionally combine
170
+ these values based on whether combine=True was called when building up the dictionary
171
+
172
+ >>> d = HTTPHeaderDict({"A": "1", "B": "foo"})
173
+ >>> d.add("A", "2", combine=True)
174
+ >>> d.add("B", "bar")
175
+ >>> list(d.items())
176
+ [
177
+ ('A', '1, 2'),
178
+ ('B', 'foo'),
179
+ ('B', 'bar'),
180
+ ]
181
+
182
+ This class conforms to the interface required by the MutableMapping ABC while
183
+ also giving us the nonstandard iteration behavior we want; items with duplicate
184
+ keys, ordered by time of first insertion.
185
+ """
186
+
187
+ _headers: HTTPHeaderDict
188
+
189
+ def __init__(self, headers: HTTPHeaderDict) -> None:
190
+ self._headers = headers
191
+
192
+ def __len__(self) -> int:
193
+ return len(list(self._headers.iteritems()))
194
+
195
+ def __iter__(self) -> typing.Iterator[tuple[str, str]]:
196
+ return self._headers.iteritems()
197
+
198
+ def __contains__(self, item: object) -> bool:
199
+ if isinstance(item, tuple) and len(item) == 2:
200
+ passed_key, passed_val = item
201
+ if isinstance(passed_key, str) and isinstance(passed_val, str):
202
+ return self._headers._has_value_for_header(passed_key, passed_val)
203
+ return False
204
+
205
+
206
+ class HTTPHeaderDict(typing.MutableMapping[str, str]):
207
+ """
208
+ :param headers:
209
+ An iterable of field-value pairs. Must not contain multiple field names
210
+ when compared case-insensitively.
211
+
212
+ :param kwargs:
213
+ Additional field-value pairs to pass in to ``dict.update``.
214
+
215
+ A ``dict`` like container for storing HTTP Headers.
216
+
217
+ Field names are stored and compared case-insensitively in compliance with
218
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
219
+ case-insensitive pair.
220
+
221
+ Using ``__setitem__`` syntax overwrites fields that compare equal
222
+ case-insensitively in order to maintain ``dict``'s api. For fields that
223
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
224
+ in a loop.
225
+
226
+ If multiple fields that are equal case-insensitively are passed to the
227
+ constructor or ``.update``, the behavior is undefined and some will be
228
+ lost.
229
+
230
+ >>> headers = HTTPHeaderDict()
231
+ >>> headers.add('Set-Cookie', 'foo=bar')
232
+ >>> headers.add('set-cookie', 'baz=quxx')
233
+ >>> headers['content-length'] = '7'
234
+ >>> headers['SET-cookie']
235
+ 'foo=bar, baz=quxx'
236
+ >>> headers['Content-Length']
237
+ '7'
238
+ """
239
+
240
+ _container: typing.MutableMapping[str, list[str]]
241
+
242
+ def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):
243
+ super().__init__()
244
+ self._container = {} # 'dict' is insert-ordered
245
+ if headers is not None:
246
+ if isinstance(headers, HTTPHeaderDict):
247
+ self._copy_from(headers)
248
+ else:
249
+ self.extend(headers)
250
+ if kwargs:
251
+ self.extend(kwargs)
252
+
253
+ def __setitem__(self, key: str, val: str) -> None:
254
+ # avoid a bytes/str comparison by decoding before httplib
255
+ if isinstance(key, bytes):
256
+ key = key.decode("latin-1")
257
+ self._container[key.lower()] = [key, val]
258
+
259
+ def __getitem__(self, key: str) -> str:
260
+ val = self._container[key.lower()]
261
+ return ", ".join(val[1:])
262
+
263
+ def __delitem__(self, key: str) -> None:
264
+ del self._container[key.lower()]
265
+
266
+ def __contains__(self, key: object) -> bool:
267
+ if isinstance(key, str):
268
+ return key.lower() in self._container
269
+ return False
270
+
271
+ def setdefault(self, key: str, default: str = "") -> str:
272
+ return super().setdefault(key, default)
273
+
274
+ def __eq__(self, other: object) -> bool:
275
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
276
+ if maybe_constructable is None:
277
+ return False
278
+ else:
279
+ other_as_http_header_dict = type(self)(maybe_constructable)
280
+
281
+ return {k.lower(): v for k, v in self.itermerged()} == {
282
+ k.lower(): v for k, v in other_as_http_header_dict.itermerged()
283
+ }
284
+
285
+ def __ne__(self, other: object) -> bool:
286
+ return not self.__eq__(other)
287
+
288
+ def __len__(self) -> int:
289
+ return len(self._container)
290
+
291
+ def __iter__(self) -> typing.Iterator[str]:
292
+ # Only provide the originally cased names
293
+ for vals in self._container.values():
294
+ yield vals[0]
295
+
296
+ def discard(self, key: str) -> None:
297
+ try:
298
+ del self[key]
299
+ except KeyError:
300
+ pass
301
+
302
+ def add(self, key: str, val: str, *, combine: bool = False) -> None:
303
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
304
+ exists.
305
+
306
+ If this is called with combine=True, instead of adding a new header value
307
+ as a distinct item during iteration, this will instead append the value to
308
+ any existing header value with a comma. If no existing header value exists
309
+ for the key, then the value will simply be added, ignoring the combine parameter.
310
+
311
+ >>> headers = HTTPHeaderDict(foo='bar')
312
+ >>> headers.add('Foo', 'baz')
313
+ >>> headers['foo']
314
+ 'bar, baz'
315
+ >>> list(headers.items())
316
+ [('foo', 'bar'), ('foo', 'baz')]
317
+ >>> headers.add('foo', 'quz', combine=True)
318
+ >>> list(headers.items())
319
+ [('foo', 'bar, baz, quz')]
320
+ """
321
+ # avoid a bytes/str comparison by decoding before httplib
322
+ if isinstance(key, bytes):
323
+ key = key.decode("latin-1")
324
+ key_lower = key.lower()
325
+ new_vals = [key, val]
326
+ # Keep the common case aka no item present as fast as possible
327
+ vals = self._container.setdefault(key_lower, new_vals)
328
+ if new_vals is not vals:
329
+ # if there are values here, then there is at least the initial
330
+ # key/value pair
331
+ assert len(vals) >= 2
332
+ if combine:
333
+ vals[-1] = vals[-1] + ", " + val
334
+ else:
335
+ vals.append(val)
336
+
337
+ def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:
338
+ """Generic import function for any type of header-like object.
339
+ Adapted version of MutableMapping.update in order to insert items
340
+ with self.add instead of self.__setitem__
341
+ """
342
+ if len(args) > 1:
343
+ raise TypeError(
344
+ f"extend() takes at most 1 positional arguments ({len(args)} given)"
345
+ )
346
+ other = args[0] if len(args) >= 1 else ()
347
+
348
+ if isinstance(other, HTTPHeaderDict):
349
+ for key, val in other.iteritems():
350
+ self.add(key, val)
351
+ elif isinstance(other, typing.Mapping):
352
+ for key, val in other.items():
353
+ self.add(key, val)
354
+ elif isinstance(other, typing.Iterable):
355
+ other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)
356
+ for key, value in other:
357
+ self.add(key, value)
358
+ elif hasattr(other, "keys") and hasattr(other, "__getitem__"):
359
+ # THIS IS NOT A TYPESAFE BRANCH
360
+ # In this branch, the object has a `keys` attr but is not a Mapping or any of
361
+ # the other types indicated in the method signature. We do some stuff with
362
+ # it as though it partially implements the Mapping interface, but we're not
363
+ # doing that stuff safely AT ALL.
364
+ for key in other.keys():
365
+ self.add(key, other[key])
366
+
367
+ for key, value in kwargs.items():
368
+ self.add(key, value)
369
+
370
+ @typing.overload
371
+ def getlist(self, key: str) -> list[str]:
372
+ ...
373
+
374
+ @typing.overload
375
+ def getlist(self, key: str, default: _DT) -> list[str] | _DT:
376
+ ...
377
+
378
+ def getlist(
379
+ self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed
380
+ ) -> list[str] | _DT:
381
+ """Returns a list of all the values for the named field. Returns an
382
+ empty list if the key doesn't exist."""
383
+ try:
384
+ vals = self._container[key.lower()]
385
+ except KeyError:
386
+ if default is _Sentinel.not_passed:
387
+ # _DT is unbound; empty list is instance of List[str]
388
+ return []
389
+ # _DT is bound; default is instance of _DT
390
+ return default
391
+ else:
392
+ # _DT may or may not be bound; vals[1:] is instance of List[str], which
393
+ # meets our external interface requirement of `Union[List[str], _DT]`.
394
+ return vals[1:]
395
+
396
+ def _prepare_for_method_change(self) -> Self:
397
+ """
398
+ Remove content-specific header fields before changing the request
399
+ method to GET or HEAD according to RFC 9110, Section 15.4.
400
+ """
401
+ content_specific_headers = [
402
+ "Content-Encoding",
403
+ "Content-Language",
404
+ "Content-Location",
405
+ "Content-Type",
406
+ "Content-Length",
407
+ "Digest",
408
+ "Last-Modified",
409
+ ]
410
+ for header in content_specific_headers:
411
+ self.discard(header)
412
+ return self
413
+
414
+ # Backwards compatibility for httplib
415
+ getheaders = getlist
416
+ getallmatchingheaders = getlist
417
+ iget = getlist
418
+
419
+ # Backwards compatibility for http.cookiejar
420
+ get_all = getlist
421
+
422
+ def __repr__(self) -> str:
423
+ return f"{type(self).__name__}({dict(self.itermerged())})"
424
+
425
+ def _copy_from(self, other: HTTPHeaderDict) -> None:
426
+ for key in other:
427
+ val = other.getlist(key)
428
+ self._container[key.lower()] = [key, *val]
429
+
430
+ def copy(self) -> HTTPHeaderDict:
431
+ clone = type(self)()
432
+ clone._copy_from(self)
433
+ return clone
434
+
435
+ def iteritems(self) -> typing.Iterator[tuple[str, str]]:
436
+ """Iterate over all header lines, including duplicate ones."""
437
+ for key in self:
438
+ vals = self._container[key.lower()]
439
+ for val in vals[1:]:
440
+ yield vals[0], val
441
+
442
+ def itermerged(self) -> typing.Iterator[tuple[str, str]]:
443
+ """Iterate over all headers, merging duplicate ones together."""
444
+ for key in self:
445
+ val = self._container[key.lower()]
446
+ yield val[0], ", ".join(val[1:])
447
+
448
+ def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]
449
+ return HTTPHeaderDictItemView(self)
450
+
451
+ def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:
452
+ if header_name in self:
453
+ return potential_value in self._container[header_name.lower()][1:]
454
+ return False
455
+
456
+ def __ior__(self, other: object) -> HTTPHeaderDict:
457
+ # Supports extending a header dict in-place using operator |=
458
+ # combining items with add instead of __setitem__
459
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
460
+ if maybe_constructable is None:
461
+ return NotImplemented
462
+ self.extend(maybe_constructable)
463
+ return self
464
+
465
+ def __or__(self, other: object) -> HTTPHeaderDict:
466
+ # Supports merging header dicts using operator |
467
+ # combining items with add instead of __setitem__
468
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
469
+ if maybe_constructable is None:
470
+ return NotImplemented
471
+ result = self.copy()
472
+ result.extend(maybe_constructable)
473
+ return result
474
+
475
+ def __ror__(self, other: object) -> HTTPHeaderDict:
476
+ # Supports merging header dicts using operator | when other is on left side
477
+ # combining items with add instead of __setitem__
478
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
479
+ if maybe_constructable is None:
480
+ return NotImplemented
481
+ result = type(self)(maybe_constructable)
482
+ result.extend(self)
483
+ return result
llmeval-env/lib/python3.10/site-packages/urllib3/_request_methods.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json as _json
4
+ import typing
5
+ from urllib.parse import urlencode
6
+
7
+ from ._base_connection import _TYPE_BODY
8
+ from ._collections import HTTPHeaderDict
9
+ from .filepost import _TYPE_FIELDS, encode_multipart_formdata
10
+ from .response import BaseHTTPResponse
11
+
12
+ __all__ = ["RequestMethods"]
13
+
14
+ _TYPE_ENCODE_URL_FIELDS = typing.Union[
15
+ typing.Sequence[typing.Tuple[str, typing.Union[str, bytes]]],
16
+ typing.Mapping[str, typing.Union[str, bytes]],
17
+ ]
18
+
19
+
20
+ class RequestMethods:
21
+ """
22
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
23
+ as :class:`urllib3.HTTPConnectionPool` and
24
+ :class:`urllib3.PoolManager`.
25
+
26
+ Provides behavior for making common types of HTTP request methods and
27
+ decides which type of request field encoding to use.
28
+
29
+ Specifically,
30
+
31
+ :meth:`.request_encode_url` is for sending requests whose fields are
32
+ encoded in the URL (such as GET, HEAD, DELETE).
33
+
34
+ :meth:`.request_encode_body` is for sending requests whose fields are
35
+ encoded in the *body* of the request using multipart or www-form-urlencoded
36
+ (such as for POST, PUT, PATCH).
37
+
38
+ :meth:`.request` is for making any kind of request, it will look up the
39
+ appropriate encoding format and use one of the above two methods to make
40
+ the request.
41
+
42
+ Initializer parameters:
43
+
44
+ :param headers:
45
+ Headers to include with all requests, unless other headers are given
46
+ explicitly.
47
+ """
48
+
49
+ _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
50
+
51
+ def __init__(self, headers: typing.Mapping[str, str] | None = None) -> None:
52
+ self.headers = headers or {}
53
+
54
+ def urlopen(
55
+ self,
56
+ method: str,
57
+ url: str,
58
+ body: _TYPE_BODY | None = None,
59
+ headers: typing.Mapping[str, str] | None = None,
60
+ encode_multipart: bool = True,
61
+ multipart_boundary: str | None = None,
62
+ **kw: typing.Any,
63
+ ) -> BaseHTTPResponse: # Abstract
64
+ raise NotImplementedError(
65
+ "Classes extending RequestMethods must implement "
66
+ "their own ``urlopen`` method."
67
+ )
68
+
69
+ def request(
70
+ self,
71
+ method: str,
72
+ url: str,
73
+ body: _TYPE_BODY | None = None,
74
+ fields: _TYPE_FIELDS | None = None,
75
+ headers: typing.Mapping[str, str] | None = None,
76
+ json: typing.Any | None = None,
77
+ **urlopen_kw: typing.Any,
78
+ ) -> BaseHTTPResponse:
79
+ """
80
+ Make a request using :meth:`urlopen` with the appropriate encoding of
81
+ ``fields`` based on the ``method`` used.
82
+
83
+ This is a convenience method that requires the least amount of manual
84
+ effort. It can be used in most situations, while still having the
85
+ option to drop down to more specific methods when necessary, such as
86
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
87
+ or even the lowest level :meth:`urlopen`.
88
+
89
+ :param method:
90
+ HTTP request method (such as GET, POST, PUT, etc.)
91
+
92
+ :param url:
93
+ The URL to perform the request on.
94
+
95
+ :param body:
96
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
97
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
98
+
99
+ :param fields:
100
+ Data to encode and send in the request body. Values are processed
101
+ by :func:`urllib.parse.urlencode`.
102
+
103
+ :param headers:
104
+ Dictionary of custom headers to send, such as User-Agent,
105
+ If-None-Match, etc. If None, pool headers are used. If provided,
106
+ these headers completely replace any pool-specific headers.
107
+
108
+ :param json:
109
+ Data to encode and send as JSON with UTF-encoded in the request body.
110
+ The ``"Content-Type"`` header will be set to ``"application/json"``
111
+ unless specified otherwise.
112
+ """
113
+ method = method.upper()
114
+
115
+ if json is not None and body is not None:
116
+ raise TypeError(
117
+ "request got values for both 'body' and 'json' parameters which are mutually exclusive"
118
+ )
119
+
120
+ if json is not None:
121
+ if headers is None:
122
+ headers = self.headers
123
+
124
+ if not ("content-type" in map(str.lower, headers.keys())):
125
+ headers = HTTPHeaderDict(headers)
126
+ headers["Content-Type"] = "application/json"
127
+
128
+ body = _json.dumps(json, separators=(",", ":"), ensure_ascii=False).encode(
129
+ "utf-8"
130
+ )
131
+
132
+ if body is not None:
133
+ urlopen_kw["body"] = body
134
+
135
+ if method in self._encode_url_methods:
136
+ return self.request_encode_url(
137
+ method,
138
+ url,
139
+ fields=fields, # type: ignore[arg-type]
140
+ headers=headers,
141
+ **urlopen_kw,
142
+ )
143
+ else:
144
+ return self.request_encode_body(
145
+ method, url, fields=fields, headers=headers, **urlopen_kw
146
+ )
147
+
148
+ def request_encode_url(
149
+ self,
150
+ method: str,
151
+ url: str,
152
+ fields: _TYPE_ENCODE_URL_FIELDS | None = None,
153
+ headers: typing.Mapping[str, str] | None = None,
154
+ **urlopen_kw: str,
155
+ ) -> BaseHTTPResponse:
156
+ """
157
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
158
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
159
+
160
+ :param method:
161
+ HTTP request method (such as GET, POST, PUT, etc.)
162
+
163
+ :param url:
164
+ The URL to perform the request on.
165
+
166
+ :param fields:
167
+ Data to encode and send in the request body.
168
+
169
+ :param headers:
170
+ Dictionary of custom headers to send, such as User-Agent,
171
+ If-None-Match, etc. If None, pool headers are used. If provided,
172
+ these headers completely replace any pool-specific headers.
173
+ """
174
+ if headers is None:
175
+ headers = self.headers
176
+
177
+ extra_kw: dict[str, typing.Any] = {"headers": headers}
178
+ extra_kw.update(urlopen_kw)
179
+
180
+ if fields:
181
+ url += "?" + urlencode(fields)
182
+
183
+ return self.urlopen(method, url, **extra_kw)
184
+
185
+ def request_encode_body(
186
+ self,
187
+ method: str,
188
+ url: str,
189
+ fields: _TYPE_FIELDS | None = None,
190
+ headers: typing.Mapping[str, str] | None = None,
191
+ encode_multipart: bool = True,
192
+ multipart_boundary: str | None = None,
193
+ **urlopen_kw: str,
194
+ ) -> BaseHTTPResponse:
195
+ """
196
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
197
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
198
+
199
+ When ``encode_multipart=True`` (default), then
200
+ :func:`urllib3.encode_multipart_formdata` is used to encode
201
+ the payload with the appropriate content type. Otherwise
202
+ :func:`urllib.parse.urlencode` is used with the
203
+ 'application/x-www-form-urlencoded' content type.
204
+
205
+ Multipart encoding must be used when posting files, and it's reasonably
206
+ safe to use it in other times too. However, it may break request
207
+ signing, such as with OAuth.
208
+
209
+ Supports an optional ``fields`` parameter of key/value strings AND
210
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
211
+ the MIME type is optional. For example::
212
+
213
+ fields = {
214
+ 'foo': 'bar',
215
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
216
+ 'realfile': ('barfile.txt', open('realfile').read()),
217
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
218
+ 'image/jpeg'),
219
+ 'nonamefile': 'contents of nonamefile field',
220
+ }
221
+
222
+ When uploading a file, providing a filename (the first parameter of the
223
+ tuple) is optional but recommended to best mimic behavior of browsers.
224
+
225
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
226
+ be overwritten because it depends on the dynamic random boundary string
227
+ which is used to compose the body of the request. The random boundary
228
+ string can be explicitly set with the ``multipart_boundary`` parameter.
229
+
230
+ :param method:
231
+ HTTP request method (such as GET, POST, PUT, etc.)
232
+
233
+ :param url:
234
+ The URL to perform the request on.
235
+
236
+ :param fields:
237
+ Data to encode and send in the request body.
238
+
239
+ :param headers:
240
+ Dictionary of custom headers to send, such as User-Agent,
241
+ If-None-Match, etc. If None, pool headers are used. If provided,
242
+ these headers completely replace any pool-specific headers.
243
+
244
+ :param encode_multipart:
245
+ If True, encode the ``fields`` using the multipart/form-data MIME
246
+ format.
247
+
248
+ :param multipart_boundary:
249
+ If not specified, then a random boundary will be generated using
250
+ :func:`urllib3.filepost.choose_boundary`.
251
+ """
252
+ if headers is None:
253
+ headers = self.headers
254
+
255
+ extra_kw: dict[str, typing.Any] = {"headers": HTTPHeaderDict(headers)}
256
+ body: bytes | str
257
+
258
+ if fields:
259
+ if "body" in urlopen_kw:
260
+ raise TypeError(
261
+ "request got values for both 'fields' and 'body', can only specify one."
262
+ )
263
+
264
+ if encode_multipart:
265
+ body, content_type = encode_multipart_formdata(
266
+ fields, boundary=multipart_boundary
267
+ )
268
+ else:
269
+ body, content_type = (
270
+ urlencode(fields), # type: ignore[arg-type]
271
+ "application/x-www-form-urlencoded",
272
+ )
273
+
274
+ extra_kw["body"] = body
275
+ extra_kw["headers"].setdefault("Content-Type", content_type)
276
+
277
+ extra_kw.update(urlopen_kw)
278
+
279
+ return self.urlopen(method, url, **extra_kw)
llmeval-env/lib/python3.10/site-packages/urllib3/_version.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # This file is protected via CODEOWNERS
2
+ from __future__ import annotations
3
+
4
+ __version__ = "2.2.1"
llmeval-env/lib/python3.10/site-packages/urllib3/connection.py ADDED
@@ -0,0 +1,930 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import datetime
4
+ import logging
5
+ import os
6
+ import re
7
+ import socket
8
+ import sys
9
+ import typing
10
+ import warnings
11
+ from http.client import HTTPConnection as _HTTPConnection
12
+ from http.client import HTTPException as HTTPException # noqa: F401
13
+ from http.client import ResponseNotReady
14
+ from socket import timeout as SocketTimeout
15
+
16
+ if typing.TYPE_CHECKING:
17
+ from typing import Literal
18
+
19
+ from .response import HTTPResponse
20
+ from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT
21
+ from .util.ssltransport import SSLTransport
22
+
23
+ from ._collections import HTTPHeaderDict
24
+ from .util.response import assert_header_parsing
25
+ from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout
26
+ from .util.util import to_str
27
+ from .util.wait import wait_for_read
28
+
29
+ try: # Compiled with SSL?
30
+ import ssl
31
+
32
+ BaseSSLError = ssl.SSLError
33
+ except (ImportError, AttributeError):
34
+ ssl = None # type: ignore[assignment]
35
+
36
+ class BaseSSLError(BaseException): # type: ignore[no-redef]
37
+ pass
38
+
39
+
40
+ from ._base_connection import _TYPE_BODY
41
+ from ._base_connection import ProxyConfig as ProxyConfig
42
+ from ._base_connection import _ResponseOptions as _ResponseOptions
43
+ from ._version import __version__
44
+ from .exceptions import (
45
+ ConnectTimeoutError,
46
+ HeaderParsingError,
47
+ NameResolutionError,
48
+ NewConnectionError,
49
+ ProxyError,
50
+ SystemTimeWarning,
51
+ )
52
+ from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_
53
+ from .util.request import body_to_chunks
54
+ from .util.ssl_ import assert_fingerprint as _assert_fingerprint
55
+ from .util.ssl_ import (
56
+ create_urllib3_context,
57
+ is_ipaddress,
58
+ resolve_cert_reqs,
59
+ resolve_ssl_version,
60
+ ssl_wrap_socket,
61
+ )
62
+ from .util.ssl_match_hostname import CertificateError, match_hostname
63
+ from .util.url import Url
64
+
65
+ # Not a no-op, we're adding this to the namespace so it can be imported.
66
+ ConnectionError = ConnectionError
67
+ BrokenPipeError = BrokenPipeError
68
+
69
+
70
+ log = logging.getLogger(__name__)
71
+
72
+ port_by_scheme = {"http": 80, "https": 443}
73
+
74
+ # When it comes time to update this value as a part of regular maintenance
75
+ # (ie test_recent_date is failing) update it to ~6 months before the current date.
76
+ RECENT_DATE = datetime.date(2023, 6, 1)
77
+
78
+ _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
79
+
80
+ _HAS_SYS_AUDIT = hasattr(sys, "audit")
81
+
82
+
83
+ class HTTPConnection(_HTTPConnection):
84
+ """
85
+ Based on :class:`http.client.HTTPConnection` but provides an extra constructor
86
+ backwards-compatibility layer between older and newer Pythons.
87
+
88
+ Additional keyword parameters are used to configure attributes of the connection.
89
+ Accepted parameters include:
90
+
91
+ - ``source_address``: Set the source address for the current connection.
92
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
93
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
94
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
95
+
96
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
97
+ you might pass:
98
+
99
+ .. code-block:: python
100
+
101
+ HTTPConnection.default_socket_options + [
102
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
103
+ ]
104
+
105
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
106
+ """
107
+
108
+ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc]
109
+
110
+ #: Disable Nagle's algorithm by default.
111
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
112
+ default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [
113
+ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
114
+ ]
115
+
116
+ #: Whether this connection verifies the host's certificate.
117
+ is_verified: bool = False
118
+
119
+ #: Whether this proxy connection verified the proxy host's certificate.
120
+ # If no proxy is currently connected to the value will be ``None``.
121
+ proxy_is_verified: bool | None = None
122
+
123
+ blocksize: int
124
+ source_address: tuple[str, int] | None
125
+ socket_options: connection._TYPE_SOCKET_OPTIONS | None
126
+
127
+ _has_connected_to_proxy: bool
128
+ _response_options: _ResponseOptions | None
129
+ _tunnel_host: str | None
130
+ _tunnel_port: int | None
131
+ _tunnel_scheme: str | None
132
+
133
+ def __init__(
134
+ self,
135
+ host: str,
136
+ port: int | None = None,
137
+ *,
138
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
139
+ source_address: tuple[str, int] | None = None,
140
+ blocksize: int = 16384,
141
+ socket_options: None
142
+ | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,
143
+ proxy: Url | None = None,
144
+ proxy_config: ProxyConfig | None = None,
145
+ ) -> None:
146
+ super().__init__(
147
+ host=host,
148
+ port=port,
149
+ timeout=Timeout.resolve_default_timeout(timeout),
150
+ source_address=source_address,
151
+ blocksize=blocksize,
152
+ )
153
+ self.socket_options = socket_options
154
+ self.proxy = proxy
155
+ self.proxy_config = proxy_config
156
+
157
+ self._has_connected_to_proxy = False
158
+ self._response_options = None
159
+ self._tunnel_host: str | None = None
160
+ self._tunnel_port: int | None = None
161
+ self._tunnel_scheme: str | None = None
162
+
163
+ @property
164
+ def host(self) -> str:
165
+ """
166
+ Getter method to remove any trailing dots that indicate the hostname is an FQDN.
167
+
168
+ In general, SSL certificates don't include the trailing dot indicating a
169
+ fully-qualified domain name, and thus, they don't validate properly when
170
+ checked against a domain name that includes the dot. In addition, some
171
+ servers may not expect to receive the trailing dot when provided.
172
+
173
+ However, the hostname with trailing dot is critical to DNS resolution; doing a
174
+ lookup with the trailing dot will properly only resolve the appropriate FQDN,
175
+ whereas a lookup without a trailing dot will search the system's search domain
176
+ list. Thus, it's important to keep the original host around for use only in
177
+ those cases where it's appropriate (i.e., when doing DNS lookup to establish the
178
+ actual TCP connection across which we're going to send HTTP requests).
179
+ """
180
+ return self._dns_host.rstrip(".")
181
+
182
+ @host.setter
183
+ def host(self, value: str) -> None:
184
+ """
185
+ Setter for the `host` property.
186
+
187
+ We assume that only urllib3 uses the _dns_host attribute; httplib itself
188
+ only uses `host`, and it seems reasonable that other libraries follow suit.
189
+ """
190
+ self._dns_host = value
191
+
192
+ def _new_conn(self) -> socket.socket:
193
+ """Establish a socket connection and set nodelay settings on it.
194
+
195
+ :return: New socket connection.
196
+ """
197
+ try:
198
+ sock = connection.create_connection(
199
+ (self._dns_host, self.port),
200
+ self.timeout,
201
+ source_address=self.source_address,
202
+ socket_options=self.socket_options,
203
+ )
204
+ except socket.gaierror as e:
205
+ raise NameResolutionError(self.host, self, e) from e
206
+ except SocketTimeout as e:
207
+ raise ConnectTimeoutError(
208
+ self,
209
+ f"Connection to {self.host} timed out. (connect timeout={self.timeout})",
210
+ ) from e
211
+
212
+ except OSError as e:
213
+ raise NewConnectionError(
214
+ self, f"Failed to establish a new connection: {e}"
215
+ ) from e
216
+
217
+ # Audit hooks are only available in Python 3.8+
218
+ if _HAS_SYS_AUDIT:
219
+ sys.audit("http.client.connect", self, self.host, self.port)
220
+
221
+ return sock
222
+
223
+ def set_tunnel(
224
+ self,
225
+ host: str,
226
+ port: int | None = None,
227
+ headers: typing.Mapping[str, str] | None = None,
228
+ scheme: str = "http",
229
+ ) -> None:
230
+ if scheme not in ("http", "https"):
231
+ raise ValueError(
232
+ f"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'"
233
+ )
234
+ super().set_tunnel(host, port=port, headers=headers)
235
+ self._tunnel_scheme = scheme
236
+
237
+ def connect(self) -> None:
238
+ self.sock = self._new_conn()
239
+ if self._tunnel_host:
240
+ # If we're tunneling it means we're connected to our proxy.
241
+ self._has_connected_to_proxy = True
242
+
243
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
244
+ self._tunnel() # type: ignore[attr-defined]
245
+
246
+ # If there's a proxy to be connected to we are fully connected.
247
+ # This is set twice (once above and here) due to forwarding proxies
248
+ # not using tunnelling.
249
+ self._has_connected_to_proxy = bool(self.proxy)
250
+
251
+ if self._has_connected_to_proxy:
252
+ self.proxy_is_verified = False
253
+
254
+ @property
255
+ def is_closed(self) -> bool:
256
+ return self.sock is None
257
+
258
+ @property
259
+ def is_connected(self) -> bool:
260
+ if self.sock is None:
261
+ return False
262
+ return not wait_for_read(self.sock, timeout=0.0)
263
+
264
+ @property
265
+ def has_connected_to_proxy(self) -> bool:
266
+ return self._has_connected_to_proxy
267
+
268
+ @property
269
+ def proxy_is_forwarding(self) -> bool:
270
+ """
271
+ Return True if a forwarding proxy is configured, else return False
272
+ """
273
+ return bool(self.proxy) and self._tunnel_host is None
274
+
275
+ def close(self) -> None:
276
+ try:
277
+ super().close()
278
+ finally:
279
+ # Reset all stateful properties so connection
280
+ # can be re-used without leaking prior configs.
281
+ self.sock = None
282
+ self.is_verified = False
283
+ self.proxy_is_verified = None
284
+ self._has_connected_to_proxy = False
285
+ self._response_options = None
286
+ self._tunnel_host = None
287
+ self._tunnel_port = None
288
+ self._tunnel_scheme = None
289
+
290
+ def putrequest(
291
+ self,
292
+ method: str,
293
+ url: str,
294
+ skip_host: bool = False,
295
+ skip_accept_encoding: bool = False,
296
+ ) -> None:
297
+ """"""
298
+ # Empty docstring because the indentation of CPython's implementation
299
+ # is broken but we don't want this method in our documentation.
300
+ match = _CONTAINS_CONTROL_CHAR_RE.search(method)
301
+ if match:
302
+ raise ValueError(
303
+ f"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})"
304
+ )
305
+
306
+ return super().putrequest(
307
+ method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding
308
+ )
309
+
310
+ def putheader(self, header: str, *values: str) -> None: # type: ignore[override]
311
+ """"""
312
+ if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):
313
+ super().putheader(header, *values)
314
+ elif to_str(header.lower()) not in SKIPPABLE_HEADERS:
315
+ skippable_headers = "', '".join(
316
+ [str.title(header) for header in sorted(SKIPPABLE_HEADERS)]
317
+ )
318
+ raise ValueError(
319
+ f"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'"
320
+ )
321
+
322
+ # `request` method's signature intentionally violates LSP.
323
+ # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental.
324
+ def request( # type: ignore[override]
325
+ self,
326
+ method: str,
327
+ url: str,
328
+ body: _TYPE_BODY | None = None,
329
+ headers: typing.Mapping[str, str] | None = None,
330
+ *,
331
+ chunked: bool = False,
332
+ preload_content: bool = True,
333
+ decode_content: bool = True,
334
+ enforce_content_length: bool = True,
335
+ ) -> None:
336
+ # Update the inner socket's timeout value to send the request.
337
+ # This only triggers if the connection is re-used.
338
+ if self.sock is not None:
339
+ self.sock.settimeout(self.timeout)
340
+
341
+ # Store these values to be fed into the HTTPResponse
342
+ # object later. TODO: Remove this in favor of a real
343
+ # HTTP lifecycle mechanism.
344
+
345
+ # We have to store these before we call .request()
346
+ # because sometimes we can still salvage a response
347
+ # off the wire even if we aren't able to completely
348
+ # send the request body.
349
+ self._response_options = _ResponseOptions(
350
+ request_method=method,
351
+ request_url=url,
352
+ preload_content=preload_content,
353
+ decode_content=decode_content,
354
+ enforce_content_length=enforce_content_length,
355
+ )
356
+
357
+ if headers is None:
358
+ headers = {}
359
+ header_keys = frozenset(to_str(k.lower()) for k in headers)
360
+ skip_accept_encoding = "accept-encoding" in header_keys
361
+ skip_host = "host" in header_keys
362
+ self.putrequest(
363
+ method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
364
+ )
365
+
366
+ # Transform the body into an iterable of sendall()-able chunks
367
+ # and detect if an explicit Content-Length is doable.
368
+ chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize)
369
+ chunks = chunks_and_cl.chunks
370
+ content_length = chunks_and_cl.content_length
371
+
372
+ # When chunked is explicit set to 'True' we respect that.
373
+ if chunked:
374
+ if "transfer-encoding" not in header_keys:
375
+ self.putheader("Transfer-Encoding", "chunked")
376
+ else:
377
+ # Detect whether a framing mechanism is already in use. If so
378
+ # we respect that value, otherwise we pick chunked vs content-length
379
+ # depending on the type of 'body'.
380
+ if "content-length" in header_keys:
381
+ chunked = False
382
+ elif "transfer-encoding" in header_keys:
383
+ chunked = True
384
+
385
+ # Otherwise we go off the recommendation of 'body_to_chunks()'.
386
+ else:
387
+ chunked = False
388
+ if content_length is None:
389
+ if chunks is not None:
390
+ chunked = True
391
+ self.putheader("Transfer-Encoding", "chunked")
392
+ else:
393
+ self.putheader("Content-Length", str(content_length))
394
+
395
+ # Now that framing headers are out of the way we send all the other headers.
396
+ if "user-agent" not in header_keys:
397
+ self.putheader("User-Agent", _get_default_user_agent())
398
+ for header, value in headers.items():
399
+ self.putheader(header, value)
400
+ self.endheaders()
401
+
402
+ # If we're given a body we start sending that in chunks.
403
+ if chunks is not None:
404
+ for chunk in chunks:
405
+ # Sending empty chunks isn't allowed for TE: chunked
406
+ # as it indicates the end of the body.
407
+ if not chunk:
408
+ continue
409
+ if isinstance(chunk, str):
410
+ chunk = chunk.encode("utf-8")
411
+ if chunked:
412
+ self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk))
413
+ else:
414
+ self.send(chunk)
415
+
416
+ # Regardless of whether we have a body or not, if we're in
417
+ # chunked mode we want to send an explicit empty chunk.
418
+ if chunked:
419
+ self.send(b"0\r\n\r\n")
420
+
421
+ def request_chunked(
422
+ self,
423
+ method: str,
424
+ url: str,
425
+ body: _TYPE_BODY | None = None,
426
+ headers: typing.Mapping[str, str] | None = None,
427
+ ) -> None:
428
+ """
429
+ Alternative to the common request method, which sends the
430
+ body with chunked encoding and not as one block
431
+ """
432
+ warnings.warn(
433
+ "HTTPConnection.request_chunked() is deprecated and will be removed "
434
+ "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).",
435
+ category=DeprecationWarning,
436
+ stacklevel=2,
437
+ )
438
+ self.request(method, url, body=body, headers=headers, chunked=True)
439
+
440
+ def getresponse( # type: ignore[override]
441
+ self,
442
+ ) -> HTTPResponse:
443
+ """
444
+ Get the response from the server.
445
+
446
+ If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable.
447
+
448
+ If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed.
449
+ """
450
+ # Raise the same error as http.client.HTTPConnection
451
+ if self._response_options is None:
452
+ raise ResponseNotReady()
453
+
454
+ # Reset this attribute for being used again.
455
+ resp_options = self._response_options
456
+ self._response_options = None
457
+
458
+ # Since the connection's timeout value may have been updated
459
+ # we need to set the timeout on the socket.
460
+ self.sock.settimeout(self.timeout)
461
+
462
+ # This is needed here to avoid circular import errors
463
+ from .response import HTTPResponse
464
+
465
+ # Get the response from http.client.HTTPConnection
466
+ httplib_response = super().getresponse()
467
+
468
+ try:
469
+ assert_header_parsing(httplib_response.msg)
470
+ except (HeaderParsingError, TypeError) as hpe:
471
+ log.warning(
472
+ "Failed to parse headers (url=%s): %s",
473
+ _url_from_connection(self, resp_options.request_url),
474
+ hpe,
475
+ exc_info=True,
476
+ )
477
+
478
+ headers = HTTPHeaderDict(httplib_response.msg.items())
479
+
480
+ response = HTTPResponse(
481
+ body=httplib_response,
482
+ headers=headers,
483
+ status=httplib_response.status,
484
+ version=httplib_response.version,
485
+ reason=httplib_response.reason,
486
+ preload_content=resp_options.preload_content,
487
+ decode_content=resp_options.decode_content,
488
+ original_response=httplib_response,
489
+ enforce_content_length=resp_options.enforce_content_length,
490
+ request_method=resp_options.request_method,
491
+ request_url=resp_options.request_url,
492
+ )
493
+ return response
494
+
495
+
496
+ class HTTPSConnection(HTTPConnection):
497
+ """
498
+ Many of the parameters to this constructor are passed to the underlying SSL
499
+ socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.
500
+ """
501
+
502
+ default_port = port_by_scheme["https"] # type: ignore[misc]
503
+
504
+ cert_reqs: int | str | None = None
505
+ ca_certs: str | None = None
506
+ ca_cert_dir: str | None = None
507
+ ca_cert_data: None | str | bytes = None
508
+ ssl_version: int | str | None = None
509
+ ssl_minimum_version: int | None = None
510
+ ssl_maximum_version: int | None = None
511
+ assert_fingerprint: str | None = None
512
+
513
+ def __init__(
514
+ self,
515
+ host: str,
516
+ port: int | None = None,
517
+ *,
518
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
519
+ source_address: tuple[str, int] | None = None,
520
+ blocksize: int = 16384,
521
+ socket_options: None
522
+ | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,
523
+ proxy: Url | None = None,
524
+ proxy_config: ProxyConfig | None = None,
525
+ cert_reqs: int | str | None = None,
526
+ assert_hostname: None | str | Literal[False] = None,
527
+ assert_fingerprint: str | None = None,
528
+ server_hostname: str | None = None,
529
+ ssl_context: ssl.SSLContext | None = None,
530
+ ca_certs: str | None = None,
531
+ ca_cert_dir: str | None = None,
532
+ ca_cert_data: None | str | bytes = None,
533
+ ssl_minimum_version: int | None = None,
534
+ ssl_maximum_version: int | None = None,
535
+ ssl_version: int | str | None = None, # Deprecated
536
+ cert_file: str | None = None,
537
+ key_file: str | None = None,
538
+ key_password: str | None = None,
539
+ ) -> None:
540
+ super().__init__(
541
+ host,
542
+ port=port,
543
+ timeout=timeout,
544
+ source_address=source_address,
545
+ blocksize=blocksize,
546
+ socket_options=socket_options,
547
+ proxy=proxy,
548
+ proxy_config=proxy_config,
549
+ )
550
+
551
+ self.key_file = key_file
552
+ self.cert_file = cert_file
553
+ self.key_password = key_password
554
+ self.ssl_context = ssl_context
555
+ self.server_hostname = server_hostname
556
+ self.assert_hostname = assert_hostname
557
+ self.assert_fingerprint = assert_fingerprint
558
+ self.ssl_version = ssl_version
559
+ self.ssl_minimum_version = ssl_minimum_version
560
+ self.ssl_maximum_version = ssl_maximum_version
561
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
562
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
563
+ self.ca_cert_data = ca_cert_data
564
+
565
+ # cert_reqs depends on ssl_context so calculate last.
566
+ if cert_reqs is None:
567
+ if self.ssl_context is not None:
568
+ cert_reqs = self.ssl_context.verify_mode
569
+ else:
570
+ cert_reqs = resolve_cert_reqs(None)
571
+ self.cert_reqs = cert_reqs
572
+
573
+ def set_cert(
574
+ self,
575
+ key_file: str | None = None,
576
+ cert_file: str | None = None,
577
+ cert_reqs: int | str | None = None,
578
+ key_password: str | None = None,
579
+ ca_certs: str | None = None,
580
+ assert_hostname: None | str | Literal[False] = None,
581
+ assert_fingerprint: str | None = None,
582
+ ca_cert_dir: str | None = None,
583
+ ca_cert_data: None | str | bytes = None,
584
+ ) -> None:
585
+ """
586
+ This method should only be called once, before the connection is used.
587
+ """
588
+ warnings.warn(
589
+ "HTTPSConnection.set_cert() is deprecated and will be removed "
590
+ "in urllib3 v2.1.0. Instead provide the parameters to the "
591
+ "HTTPSConnection constructor.",
592
+ category=DeprecationWarning,
593
+ stacklevel=2,
594
+ )
595
+
596
+ # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
597
+ # have an SSLContext object in which case we'll use its verify_mode.
598
+ if cert_reqs is None:
599
+ if self.ssl_context is not None:
600
+ cert_reqs = self.ssl_context.verify_mode
601
+ else:
602
+ cert_reqs = resolve_cert_reqs(None)
603
+
604
+ self.key_file = key_file
605
+ self.cert_file = cert_file
606
+ self.cert_reqs = cert_reqs
607
+ self.key_password = key_password
608
+ self.assert_hostname = assert_hostname
609
+ self.assert_fingerprint = assert_fingerprint
610
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
611
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
612
+ self.ca_cert_data = ca_cert_data
613
+
614
+ def connect(self) -> None:
615
+ sock: socket.socket | ssl.SSLSocket
616
+ self.sock = sock = self._new_conn()
617
+ server_hostname: str = self.host
618
+ tls_in_tls = False
619
+
620
+ # Do we need to establish a tunnel?
621
+ if self._tunnel_host is not None:
622
+ # We're tunneling to an HTTPS origin so need to do TLS-in-TLS.
623
+ if self._tunnel_scheme == "https":
624
+ # _connect_tls_proxy will verify and assign proxy_is_verified
625
+ self.sock = sock = self._connect_tls_proxy(self.host, sock)
626
+ tls_in_tls = True
627
+ elif self._tunnel_scheme == "http":
628
+ self.proxy_is_verified = False
629
+
630
+ # If we're tunneling it means we're connected to our proxy.
631
+ self._has_connected_to_proxy = True
632
+
633
+ self._tunnel() # type: ignore[attr-defined]
634
+ # Override the host with the one we're requesting data from.
635
+ server_hostname = self._tunnel_host
636
+
637
+ if self.server_hostname is not None:
638
+ server_hostname = self.server_hostname
639
+
640
+ is_time_off = datetime.date.today() < RECENT_DATE
641
+ if is_time_off:
642
+ warnings.warn(
643
+ (
644
+ f"System time is way off (before {RECENT_DATE}). This will probably "
645
+ "lead to SSL verification errors"
646
+ ),
647
+ SystemTimeWarning,
648
+ )
649
+
650
+ # Remove trailing '.' from fqdn hostnames to allow certificate validation
651
+ server_hostname_rm_dot = server_hostname.rstrip(".")
652
+
653
+ sock_and_verified = _ssl_wrap_socket_and_match_hostname(
654
+ sock=sock,
655
+ cert_reqs=self.cert_reqs,
656
+ ssl_version=self.ssl_version,
657
+ ssl_minimum_version=self.ssl_minimum_version,
658
+ ssl_maximum_version=self.ssl_maximum_version,
659
+ ca_certs=self.ca_certs,
660
+ ca_cert_dir=self.ca_cert_dir,
661
+ ca_cert_data=self.ca_cert_data,
662
+ cert_file=self.cert_file,
663
+ key_file=self.key_file,
664
+ key_password=self.key_password,
665
+ server_hostname=server_hostname_rm_dot,
666
+ ssl_context=self.ssl_context,
667
+ tls_in_tls=tls_in_tls,
668
+ assert_hostname=self.assert_hostname,
669
+ assert_fingerprint=self.assert_fingerprint,
670
+ )
671
+ self.sock = sock_and_verified.socket
672
+
673
+ # Forwarding proxies can never have a verified target since
674
+ # the proxy is the one doing the verification. Should instead
675
+ # use a CONNECT tunnel in order to verify the target.
676
+ # See: https://github.com/urllib3/urllib3/issues/3267.
677
+ if self.proxy_is_forwarding:
678
+ self.is_verified = False
679
+ else:
680
+ self.is_verified = sock_and_verified.is_verified
681
+
682
+ # If there's a proxy to be connected to we are fully connected.
683
+ # This is set twice (once above and here) due to forwarding proxies
684
+ # not using tunnelling.
685
+ self._has_connected_to_proxy = bool(self.proxy)
686
+
687
+ # Set `self.proxy_is_verified` unless it's already set while
688
+ # establishing a tunnel.
689
+ if self._has_connected_to_proxy and self.proxy_is_verified is None:
690
+ self.proxy_is_verified = sock_and_verified.is_verified
691
+
692
+ def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:
693
+ """
694
+ Establish a TLS connection to the proxy using the provided SSL context.
695
+ """
696
+ # `_connect_tls_proxy` is called when self._tunnel_host is truthy.
697
+ proxy_config = typing.cast(ProxyConfig, self.proxy_config)
698
+ ssl_context = proxy_config.ssl_context
699
+ sock_and_verified = _ssl_wrap_socket_and_match_hostname(
700
+ sock,
701
+ cert_reqs=self.cert_reqs,
702
+ ssl_version=self.ssl_version,
703
+ ssl_minimum_version=self.ssl_minimum_version,
704
+ ssl_maximum_version=self.ssl_maximum_version,
705
+ ca_certs=self.ca_certs,
706
+ ca_cert_dir=self.ca_cert_dir,
707
+ ca_cert_data=self.ca_cert_data,
708
+ server_hostname=hostname,
709
+ ssl_context=ssl_context,
710
+ assert_hostname=proxy_config.assert_hostname,
711
+ assert_fingerprint=proxy_config.assert_fingerprint,
712
+ # Features that aren't implemented for proxies yet:
713
+ cert_file=None,
714
+ key_file=None,
715
+ key_password=None,
716
+ tls_in_tls=False,
717
+ )
718
+ self.proxy_is_verified = sock_and_verified.is_verified
719
+ return sock_and_verified.socket # type: ignore[return-value]
720
+
721
+
722
+ class _WrappedAndVerifiedSocket(typing.NamedTuple):
723
+ """
724
+ Wrapped socket and whether the connection is
725
+ verified after the TLS handshake
726
+ """
727
+
728
+ socket: ssl.SSLSocket | SSLTransport
729
+ is_verified: bool
730
+
731
+
732
+ def _ssl_wrap_socket_and_match_hostname(
733
+ sock: socket.socket,
734
+ *,
735
+ cert_reqs: None | str | int,
736
+ ssl_version: None | str | int,
737
+ ssl_minimum_version: int | None,
738
+ ssl_maximum_version: int | None,
739
+ cert_file: str | None,
740
+ key_file: str | None,
741
+ key_password: str | None,
742
+ ca_certs: str | None,
743
+ ca_cert_dir: str | None,
744
+ ca_cert_data: None | str | bytes,
745
+ assert_hostname: None | str | Literal[False],
746
+ assert_fingerprint: str | None,
747
+ server_hostname: str | None,
748
+ ssl_context: ssl.SSLContext | None,
749
+ tls_in_tls: bool = False,
750
+ ) -> _WrappedAndVerifiedSocket:
751
+ """Logic for constructing an SSLContext from all TLS parameters, passing
752
+ that down into ssl_wrap_socket, and then doing certificate verification
753
+ either via hostname or fingerprint. This function exists to guarantee
754
+ that both proxies and targets have the same behavior when connecting via TLS.
755
+ """
756
+ default_ssl_context = False
757
+ if ssl_context is None:
758
+ default_ssl_context = True
759
+ context = create_urllib3_context(
760
+ ssl_version=resolve_ssl_version(ssl_version),
761
+ ssl_minimum_version=ssl_minimum_version,
762
+ ssl_maximum_version=ssl_maximum_version,
763
+ cert_reqs=resolve_cert_reqs(cert_reqs),
764
+ )
765
+ else:
766
+ context = ssl_context
767
+
768
+ context.verify_mode = resolve_cert_reqs(cert_reqs)
769
+
770
+ # In some cases, we want to verify hostnames ourselves
771
+ if (
772
+ # `ssl` can't verify fingerprints or alternate hostnames
773
+ assert_fingerprint
774
+ or assert_hostname
775
+ # assert_hostname can be set to False to disable hostname checking
776
+ or assert_hostname is False
777
+ # We still support OpenSSL 1.0.2, which prevents us from verifying
778
+ # hostnames easily: https://github.com/pyca/pyopenssl/pull/933
779
+ or ssl_.IS_PYOPENSSL
780
+ or not ssl_.HAS_NEVER_CHECK_COMMON_NAME
781
+ ):
782
+ context.check_hostname = False
783
+
784
+ # Try to load OS default certs if none are given. We need to do the hasattr() check
785
+ # for custom pyOpenSSL SSLContext objects because they don't support
786
+ # load_default_certs().
787
+ if (
788
+ not ca_certs
789
+ and not ca_cert_dir
790
+ and not ca_cert_data
791
+ and default_ssl_context
792
+ and hasattr(context, "load_default_certs")
793
+ ):
794
+ context.load_default_certs()
795
+
796
+ # Ensure that IPv6 addresses are in the proper format and don't have a
797
+ # scope ID. Python's SSL module fails to recognize scoped IPv6 addresses
798
+ # and interprets them as DNS hostnames.
799
+ if server_hostname is not None:
800
+ normalized = server_hostname.strip("[]")
801
+ if "%" in normalized:
802
+ normalized = normalized[: normalized.rfind("%")]
803
+ if is_ipaddress(normalized):
804
+ server_hostname = normalized
805
+
806
+ ssl_sock = ssl_wrap_socket(
807
+ sock=sock,
808
+ keyfile=key_file,
809
+ certfile=cert_file,
810
+ key_password=key_password,
811
+ ca_certs=ca_certs,
812
+ ca_cert_dir=ca_cert_dir,
813
+ ca_cert_data=ca_cert_data,
814
+ server_hostname=server_hostname,
815
+ ssl_context=context,
816
+ tls_in_tls=tls_in_tls,
817
+ )
818
+
819
+ try:
820
+ if assert_fingerprint:
821
+ _assert_fingerprint(
822
+ ssl_sock.getpeercert(binary_form=True), assert_fingerprint
823
+ )
824
+ elif (
825
+ context.verify_mode != ssl.CERT_NONE
826
+ and not context.check_hostname
827
+ and assert_hostname is not False
828
+ ):
829
+ cert: _TYPE_PEER_CERT_RET_DICT = ssl_sock.getpeercert() # type: ignore[assignment]
830
+
831
+ # Need to signal to our match_hostname whether to use 'commonName' or not.
832
+ # If we're using our own constructed SSLContext we explicitly set 'False'
833
+ # because PyPy hard-codes 'True' from SSLContext.hostname_checks_common_name.
834
+ if default_ssl_context:
835
+ hostname_checks_common_name = False
836
+ else:
837
+ hostname_checks_common_name = (
838
+ getattr(context, "hostname_checks_common_name", False) or False
839
+ )
840
+
841
+ _match_hostname(
842
+ cert,
843
+ assert_hostname or server_hostname, # type: ignore[arg-type]
844
+ hostname_checks_common_name,
845
+ )
846
+
847
+ return _WrappedAndVerifiedSocket(
848
+ socket=ssl_sock,
849
+ is_verified=context.verify_mode == ssl.CERT_REQUIRED
850
+ or bool(assert_fingerprint),
851
+ )
852
+ except BaseException:
853
+ ssl_sock.close()
854
+ raise
855
+
856
+
857
+ def _match_hostname(
858
+ cert: _TYPE_PEER_CERT_RET_DICT | None,
859
+ asserted_hostname: str,
860
+ hostname_checks_common_name: bool = False,
861
+ ) -> None:
862
+ # Our upstream implementation of ssl.match_hostname()
863
+ # only applies this normalization to IP addresses so it doesn't
864
+ # match DNS SANs so we do the same thing!
865
+ stripped_hostname = asserted_hostname.strip("[]")
866
+ if is_ipaddress(stripped_hostname):
867
+ asserted_hostname = stripped_hostname
868
+
869
+ try:
870
+ match_hostname(cert, asserted_hostname, hostname_checks_common_name)
871
+ except CertificateError as e:
872
+ log.warning(
873
+ "Certificate did not match expected hostname: %s. Certificate: %s",
874
+ asserted_hostname,
875
+ cert,
876
+ )
877
+ # Add cert to exception and reraise so client code can inspect
878
+ # the cert when catching the exception, if they want to
879
+ e._peer_cert = cert # type: ignore[attr-defined]
880
+ raise
881
+
882
+
883
+ def _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:
884
+ # Look for the phrase 'wrong version number', if found
885
+ # then we should warn the user that we're very sure that
886
+ # this proxy is HTTP-only and they have a configuration issue.
887
+ error_normalized = " ".join(re.split("[^a-z]", str(err).lower()))
888
+ is_likely_http_proxy = (
889
+ "wrong version number" in error_normalized
890
+ or "unknown protocol" in error_normalized
891
+ or "record layer failure" in error_normalized
892
+ )
893
+ http_proxy_warning = (
894
+ ". Your proxy appears to only use HTTP and not HTTPS, "
895
+ "try changing your proxy URL to be HTTP. See: "
896
+ "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
897
+ "#https-proxy-error-http-proxy"
898
+ )
899
+ new_err = ProxyError(
900
+ f"Unable to connect to proxy"
901
+ f"{http_proxy_warning if is_likely_http_proxy and proxy_scheme == 'https' else ''}",
902
+ err,
903
+ )
904
+ new_err.__cause__ = err
905
+ return new_err
906
+
907
+
908
+ def _get_default_user_agent() -> str:
909
+ return f"python-urllib3/{__version__}"
910
+
911
+
912
+ class DummyConnection:
913
+ """Used to detect a failed ConnectionCls import."""
914
+
915
+
916
+ if not ssl:
917
+ HTTPSConnection = DummyConnection # type: ignore[misc, assignment] # noqa: F811
918
+
919
+
920
+ VerifiedHTTPSConnection = HTTPSConnection
921
+
922
+
923
+ def _url_from_connection(
924
+ conn: HTTPConnection | HTTPSConnection, path: str | None = None
925
+ ) -> str:
926
+ """Returns the URL from a given connection. This is mainly used for testing and logging."""
927
+
928
+ scheme = "https" if isinstance(conn, HTTPSConnection) else "http"
929
+
930
+ return Url(scheme=scheme, host=conn.host, port=conn.port, path=path).url
llmeval-env/lib/python3.10/site-packages/urllib3/connectionpool.py ADDED
@@ -0,0 +1,1186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import errno
4
+ import logging
5
+ import queue
6
+ import sys
7
+ import typing
8
+ import warnings
9
+ import weakref
10
+ from socket import timeout as SocketTimeout
11
+ from types import TracebackType
12
+
13
+ from ._base_connection import _TYPE_BODY
14
+ from ._collections import HTTPHeaderDict
15
+ from ._request_methods import RequestMethods
16
+ from .connection import (
17
+ BaseSSLError,
18
+ BrokenPipeError,
19
+ DummyConnection,
20
+ HTTPConnection,
21
+ HTTPException,
22
+ HTTPSConnection,
23
+ ProxyConfig,
24
+ _wrap_proxy_error,
25
+ )
26
+ from .connection import port_by_scheme as port_by_scheme
27
+ from .exceptions import (
28
+ ClosedPoolError,
29
+ EmptyPoolError,
30
+ FullPoolError,
31
+ HostChangedError,
32
+ InsecureRequestWarning,
33
+ LocationValueError,
34
+ MaxRetryError,
35
+ NewConnectionError,
36
+ ProtocolError,
37
+ ProxyError,
38
+ ReadTimeoutError,
39
+ SSLError,
40
+ TimeoutError,
41
+ )
42
+ from .response import BaseHTTPResponse
43
+ from .util.connection import is_connection_dropped
44
+ from .util.proxy import connection_requires_http_tunnel
45
+ from .util.request import _TYPE_BODY_POSITION, set_file_position
46
+ from .util.retry import Retry
47
+ from .util.ssl_match_hostname import CertificateError
48
+ from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout
49
+ from .util.url import Url, _encode_target
50
+ from .util.url import _normalize_host as normalize_host
51
+ from .util.url import parse_url
52
+ from .util.util import to_str
53
+
54
+ if typing.TYPE_CHECKING:
55
+ import ssl
56
+ from typing import Literal
57
+
58
+ from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection
59
+
60
+ log = logging.getLogger(__name__)
61
+
62
+ _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None]
63
+
64
+ _SelfT = typing.TypeVar("_SelfT")
65
+
66
+
67
+ # Pool objects
68
+ class ConnectionPool:
69
+ """
70
+ Base class for all connection pools, such as
71
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
72
+
73
+ .. note::
74
+ ConnectionPool.urlopen() does not normalize or percent-encode target URIs
75
+ which is useful if your target server doesn't support percent-encoded
76
+ target URIs.
77
+ """
78
+
79
+ scheme: str | None = None
80
+ QueueCls = queue.LifoQueue
81
+
82
+ def __init__(self, host: str, port: int | None = None) -> None:
83
+ if not host:
84
+ raise LocationValueError("No host specified.")
85
+
86
+ self.host = _normalize_host(host, scheme=self.scheme)
87
+ self.port = port
88
+
89
+ # This property uses 'normalize_host()' (not '_normalize_host()')
90
+ # to avoid removing square braces around IPv6 addresses.
91
+ # This value is sent to `HTTPConnection.set_tunnel()` if called
92
+ # because square braces are required for HTTP CONNECT tunneling.
93
+ self._tunnel_host = normalize_host(host, scheme=self.scheme).lower()
94
+
95
+ def __str__(self) -> str:
96
+ return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})"
97
+
98
+ def __enter__(self: _SelfT) -> _SelfT:
99
+ return self
100
+
101
+ def __exit__(
102
+ self,
103
+ exc_type: type[BaseException] | None,
104
+ exc_val: BaseException | None,
105
+ exc_tb: TracebackType | None,
106
+ ) -> Literal[False]:
107
+ self.close()
108
+ # Return False to re-raise any potential exceptions
109
+ return False
110
+
111
+ def close(self) -> None:
112
+ """
113
+ Close all pooled connections and disable the pool.
114
+ """
115
+
116
+
117
+ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
118
+ _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
119
+
120
+
121
+ class HTTPConnectionPool(ConnectionPool, RequestMethods):
122
+ """
123
+ Thread-safe connection pool for one host.
124
+
125
+ :param host:
126
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
127
+ :class:`http.client.HTTPConnection`.
128
+
129
+ :param port:
130
+ Port used for this HTTP Connection (None is equivalent to 80), passed
131
+ into :class:`http.client.HTTPConnection`.
132
+
133
+ :param timeout:
134
+ Socket timeout in seconds for each individual connection. This can
135
+ be a float or integer, which sets the timeout for the HTTP request,
136
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
137
+ fine-grained control over request timeouts. After the constructor has
138
+ been parsed, this is always a `urllib3.util.Timeout` object.
139
+
140
+ :param maxsize:
141
+ Number of connections to save that can be reused. More than 1 is useful
142
+ in multithreaded situations. If ``block`` is set to False, more
143
+ connections will be created but they will not be saved once they've
144
+ been used.
145
+
146
+ :param block:
147
+ If set to True, no more than ``maxsize`` connections will be used at
148
+ a time. When no free connections are available, the call will block
149
+ until a connection has been released. This is a useful side effect for
150
+ particular multithreaded situations where one does not want to use more
151
+ than maxsize connections per host to prevent flooding.
152
+
153
+ :param headers:
154
+ Headers to include with all requests, unless other headers are given
155
+ explicitly.
156
+
157
+ :param retries:
158
+ Retry configuration to use by default with requests in this pool.
159
+
160
+ :param _proxy:
161
+ Parsed proxy URL, should not be used directly, instead, see
162
+ :class:`urllib3.ProxyManager`
163
+
164
+ :param _proxy_headers:
165
+ A dictionary with proxy headers, should not be used directly,
166
+ instead, see :class:`urllib3.ProxyManager`
167
+
168
+ :param \\**conn_kw:
169
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
170
+ :class:`urllib3.connection.HTTPSConnection` instances.
171
+ """
172
+
173
+ scheme = "http"
174
+ ConnectionCls: (
175
+ type[BaseHTTPConnection] | type[BaseHTTPSConnection]
176
+ ) = HTTPConnection
177
+
178
+ def __init__(
179
+ self,
180
+ host: str,
181
+ port: int | None = None,
182
+ timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
183
+ maxsize: int = 1,
184
+ block: bool = False,
185
+ headers: typing.Mapping[str, str] | None = None,
186
+ retries: Retry | bool | int | None = None,
187
+ _proxy: Url | None = None,
188
+ _proxy_headers: typing.Mapping[str, str] | None = None,
189
+ _proxy_config: ProxyConfig | None = None,
190
+ **conn_kw: typing.Any,
191
+ ):
192
+ ConnectionPool.__init__(self, host, port)
193
+ RequestMethods.__init__(self, headers)
194
+
195
+ if not isinstance(timeout, Timeout):
196
+ timeout = Timeout.from_float(timeout)
197
+
198
+ if retries is None:
199
+ retries = Retry.DEFAULT
200
+
201
+ self.timeout = timeout
202
+ self.retries = retries
203
+
204
+ self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize)
205
+ self.block = block
206
+
207
+ self.proxy = _proxy
208
+ self.proxy_headers = _proxy_headers or {}
209
+ self.proxy_config = _proxy_config
210
+
211
+ # Fill the queue up so that doing get() on it will block properly
212
+ for _ in range(maxsize):
213
+ self.pool.put(None)
214
+
215
+ # These are mostly for testing and debugging purposes.
216
+ self.num_connections = 0
217
+ self.num_requests = 0
218
+ self.conn_kw = conn_kw
219
+
220
+ if self.proxy:
221
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
222
+ # We cannot know if the user has added default socket options, so we cannot replace the
223
+ # list.
224
+ self.conn_kw.setdefault("socket_options", [])
225
+
226
+ self.conn_kw["proxy"] = self.proxy
227
+ self.conn_kw["proxy_config"] = self.proxy_config
228
+
229
+ # Do not pass 'self' as callback to 'finalize'.
230
+ # Then the 'finalize' would keep an endless living (leak) to self.
231
+ # By just passing a reference to the pool allows the garbage collector
232
+ # to free self if nobody else has a reference to it.
233
+ pool = self.pool
234
+
235
+ # Close all the HTTPConnections in the pool before the
236
+ # HTTPConnectionPool object is garbage collected.
237
+ weakref.finalize(self, _close_pool_connections, pool)
238
+
239
+ def _new_conn(self) -> BaseHTTPConnection:
240
+ """
241
+ Return a fresh :class:`HTTPConnection`.
242
+ """
243
+ self.num_connections += 1
244
+ log.debug(
245
+ "Starting new HTTP connection (%d): %s:%s",
246
+ self.num_connections,
247
+ self.host,
248
+ self.port or "80",
249
+ )
250
+
251
+ conn = self.ConnectionCls(
252
+ host=self.host,
253
+ port=self.port,
254
+ timeout=self.timeout.connect_timeout,
255
+ **self.conn_kw,
256
+ )
257
+ return conn
258
+
259
+ def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:
260
+ """
261
+ Get a connection. Will return a pooled connection if one is available.
262
+
263
+ If no connections are available and :prop:`.block` is ``False``, then a
264
+ fresh connection is returned.
265
+
266
+ :param timeout:
267
+ Seconds to wait before giving up and raising
268
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
269
+ :prop:`.block` is ``True``.
270
+ """
271
+ conn = None
272
+
273
+ if self.pool is None:
274
+ raise ClosedPoolError(self, "Pool is closed.")
275
+
276
+ try:
277
+ conn = self.pool.get(block=self.block, timeout=timeout)
278
+
279
+ except AttributeError: # self.pool is None
280
+ raise ClosedPoolError(self, "Pool is closed.") from None # Defensive:
281
+
282
+ except queue.Empty:
283
+ if self.block:
284
+ raise EmptyPoolError(
285
+ self,
286
+ "Pool is empty and a new connection can't be opened due to blocking mode.",
287
+ ) from None
288
+ pass # Oh well, we'll create a new connection then
289
+
290
+ # If this is a persistent connection, check if it got disconnected
291
+ if conn and is_connection_dropped(conn):
292
+ log.debug("Resetting dropped connection: %s", self.host)
293
+ conn.close()
294
+
295
+ return conn or self._new_conn()
296
+
297
+ def _put_conn(self, conn: BaseHTTPConnection | None) -> None:
298
+ """
299
+ Put a connection back into the pool.
300
+
301
+ :param conn:
302
+ Connection object for the current host and port as returned by
303
+ :meth:`._new_conn` or :meth:`._get_conn`.
304
+
305
+ If the pool is already full, the connection is closed and discarded
306
+ because we exceeded maxsize. If connections are discarded frequently,
307
+ then maxsize should be increased.
308
+
309
+ If the pool is closed, then the connection will be closed and discarded.
310
+ """
311
+ if self.pool is not None:
312
+ try:
313
+ self.pool.put(conn, block=False)
314
+ return # Everything is dandy, done.
315
+ except AttributeError:
316
+ # self.pool is None.
317
+ pass
318
+ except queue.Full:
319
+ # Connection never got put back into the pool, close it.
320
+ if conn:
321
+ conn.close()
322
+
323
+ if self.block:
324
+ # This should never happen if you got the conn from self._get_conn
325
+ raise FullPoolError(
326
+ self,
327
+ "Pool reached maximum size and no more connections are allowed.",
328
+ ) from None
329
+
330
+ log.warning(
331
+ "Connection pool is full, discarding connection: %s. Connection pool size: %s",
332
+ self.host,
333
+ self.pool.qsize(),
334
+ )
335
+
336
+ # Connection never got put back into the pool, close it.
337
+ if conn:
338
+ conn.close()
339
+
340
+ def _validate_conn(self, conn: BaseHTTPConnection) -> None:
341
+ """
342
+ Called right before a request is made, after the socket is created.
343
+ """
344
+
345
+ def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:
346
+ # Nothing to do for HTTP connections.
347
+ pass
348
+
349
+ def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:
350
+ """Helper that always returns a :class:`urllib3.util.Timeout`"""
351
+ if timeout is _DEFAULT_TIMEOUT:
352
+ return self.timeout.clone()
353
+
354
+ if isinstance(timeout, Timeout):
355
+ return timeout.clone()
356
+ else:
357
+ # User passed us an int/float. This is for backwards compatibility,
358
+ # can be removed later
359
+ return Timeout.from_float(timeout)
360
+
361
+ def _raise_timeout(
362
+ self,
363
+ err: BaseSSLError | OSError | SocketTimeout,
364
+ url: str,
365
+ timeout_value: _TYPE_TIMEOUT | None,
366
+ ) -> None:
367
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
368
+
369
+ if isinstance(err, SocketTimeout):
370
+ raise ReadTimeoutError(
371
+ self, url, f"Read timed out. (read timeout={timeout_value})"
372
+ ) from err
373
+
374
+ # See the above comment about EAGAIN in Python 3.
375
+ if hasattr(err, "errno") and err.errno in _blocking_errnos:
376
+ raise ReadTimeoutError(
377
+ self, url, f"Read timed out. (read timeout={timeout_value})"
378
+ ) from err
379
+
380
+ def _make_request(
381
+ self,
382
+ conn: BaseHTTPConnection,
383
+ method: str,
384
+ url: str,
385
+ body: _TYPE_BODY | None = None,
386
+ headers: typing.Mapping[str, str] | None = None,
387
+ retries: Retry | None = None,
388
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
389
+ chunked: bool = False,
390
+ response_conn: BaseHTTPConnection | None = None,
391
+ preload_content: bool = True,
392
+ decode_content: bool = True,
393
+ enforce_content_length: bool = True,
394
+ ) -> BaseHTTPResponse:
395
+ """
396
+ Perform a request on a given urllib connection object taken from our
397
+ pool.
398
+
399
+ :param conn:
400
+ a connection from one of our connection pools
401
+
402
+ :param method:
403
+ HTTP request method (such as GET, POST, PUT, etc.)
404
+
405
+ :param url:
406
+ The URL to perform the request on.
407
+
408
+ :param body:
409
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
410
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
411
+
412
+ :param headers:
413
+ Dictionary of custom headers to send, such as User-Agent,
414
+ If-None-Match, etc. If None, pool headers are used. If provided,
415
+ these headers completely replace any pool-specific headers.
416
+
417
+ :param retries:
418
+ Configure the number of retries to allow before raising a
419
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
420
+
421
+ Pass ``None`` to retry until you receive a response. Pass a
422
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
423
+ over different types of retries.
424
+ Pass an integer number to retry connection errors that many times,
425
+ but no other types of errors. Pass zero to never retry.
426
+
427
+ If ``False``, then retries are disabled and any exception is raised
428
+ immediately. Also, instead of raising a MaxRetryError on redirects,
429
+ the redirect response will be returned.
430
+
431
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
432
+
433
+ :param timeout:
434
+ If specified, overrides the default timeout for this one
435
+ request. It may be a float (in seconds) or an instance of
436
+ :class:`urllib3.util.Timeout`.
437
+
438
+ :param chunked:
439
+ If True, urllib3 will send the body using chunked transfer
440
+ encoding. Otherwise, urllib3 will send the body using the standard
441
+ content-length form. Defaults to False.
442
+
443
+ :param response_conn:
444
+ Set this to ``None`` if you will handle releasing the connection or
445
+ set the connection to have the response release it.
446
+
447
+ :param preload_content:
448
+ If True, the response's body will be preloaded during construction.
449
+
450
+ :param decode_content:
451
+ If True, will attempt to decode the body based on the
452
+ 'content-encoding' header.
453
+
454
+ :param enforce_content_length:
455
+ Enforce content length checking. Body returned by server must match
456
+ value of Content-Length header, if present. Otherwise, raise error.
457
+ """
458
+ self.num_requests += 1
459
+
460
+ timeout_obj = self._get_timeout(timeout)
461
+ timeout_obj.start_connect()
462
+ conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
463
+
464
+ try:
465
+ # Trigger any extra validation we need to do.
466
+ try:
467
+ self._validate_conn(conn)
468
+ except (SocketTimeout, BaseSSLError) as e:
469
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
470
+ raise
471
+
472
+ # _validate_conn() starts the connection to an HTTPS proxy
473
+ # so we need to wrap errors with 'ProxyError' here too.
474
+ except (
475
+ OSError,
476
+ NewConnectionError,
477
+ TimeoutError,
478
+ BaseSSLError,
479
+ CertificateError,
480
+ SSLError,
481
+ ) as e:
482
+ new_e: Exception = e
483
+ if isinstance(e, (BaseSSLError, CertificateError)):
484
+ new_e = SSLError(e)
485
+ # If the connection didn't successfully connect to it's proxy
486
+ # then there
487
+ if isinstance(
488
+ new_e, (OSError, NewConnectionError, TimeoutError, SSLError)
489
+ ) and (conn and conn.proxy and not conn.has_connected_to_proxy):
490
+ new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
491
+ raise new_e
492
+
493
+ # conn.request() calls http.client.*.request, not the method in
494
+ # urllib3.request. It also calls makefile (recv) on the socket.
495
+ try:
496
+ conn.request(
497
+ method,
498
+ url,
499
+ body=body,
500
+ headers=headers,
501
+ chunked=chunked,
502
+ preload_content=preload_content,
503
+ decode_content=decode_content,
504
+ enforce_content_length=enforce_content_length,
505
+ )
506
+
507
+ # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
508
+ # legitimately able to close the connection after sending a valid response.
509
+ # With this behaviour, the received response is still readable.
510
+ except BrokenPipeError:
511
+ pass
512
+ except OSError as e:
513
+ # MacOS/Linux
514
+ # EPROTOTYPE and ECONNRESET are needed on macOS
515
+ # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
516
+ # Condition changed later to emit ECONNRESET instead of only EPROTOTYPE.
517
+ if e.errno != errno.EPROTOTYPE and e.errno != errno.ECONNRESET:
518
+ raise
519
+
520
+ # Reset the timeout for the recv() on the socket
521
+ read_timeout = timeout_obj.read_timeout
522
+
523
+ if not conn.is_closed:
524
+ # In Python 3 socket.py will catch EAGAIN and return None when you
525
+ # try and read into the file pointer created by http.client, which
526
+ # instead raises a BadStatusLine exception. Instead of catching
527
+ # the exception and assuming all BadStatusLine exceptions are read
528
+ # timeouts, check for a zero timeout before making the request.
529
+ if read_timeout == 0:
530
+ raise ReadTimeoutError(
531
+ self, url, f"Read timed out. (read timeout={read_timeout})"
532
+ )
533
+ conn.timeout = read_timeout
534
+
535
+ # Receive the response from the server
536
+ try:
537
+ response = conn.getresponse()
538
+ except (BaseSSLError, OSError) as e:
539
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
540
+ raise
541
+
542
+ # Set properties that are used by the pooling layer.
543
+ response.retries = retries
544
+ response._connection = response_conn # type: ignore[attr-defined]
545
+ response._pool = self # type: ignore[attr-defined]
546
+
547
+ # emscripten connection doesn't have _http_vsn_str
548
+ http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
549
+ log.debug(
550
+ '%s://%s:%s "%s %s %s" %s %s',
551
+ self.scheme,
552
+ self.host,
553
+ self.port,
554
+ method,
555
+ url,
556
+ # HTTP version
557
+ http_version,
558
+ response.status,
559
+ response.length_remaining,
560
+ )
561
+
562
+ return response
563
+
564
+ def close(self) -> None:
565
+ """
566
+ Close all pooled connections and disable the pool.
567
+ """
568
+ if self.pool is None:
569
+ return
570
+ # Disable access to the pool
571
+ old_pool, self.pool = self.pool, None
572
+
573
+ # Close all the HTTPConnections in the pool.
574
+ _close_pool_connections(old_pool)
575
+
576
+ def is_same_host(self, url: str) -> bool:
577
+ """
578
+ Check if the given ``url`` is a member of the same host as this
579
+ connection pool.
580
+ """
581
+ if url.startswith("/"):
582
+ return True
583
+
584
+ # TODO: Add optional support for socket.gethostbyname checking.
585
+ scheme, _, host, port, *_ = parse_url(url)
586
+ scheme = scheme or "http"
587
+ if host is not None:
588
+ host = _normalize_host(host, scheme=scheme)
589
+
590
+ # Use explicit default port for comparison when none is given
591
+ if self.port and not port:
592
+ port = port_by_scheme.get(scheme)
593
+ elif not self.port and port == port_by_scheme.get(scheme):
594
+ port = None
595
+
596
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
597
+
598
+ def urlopen( # type: ignore[override]
599
+ self,
600
+ method: str,
601
+ url: str,
602
+ body: _TYPE_BODY | None = None,
603
+ headers: typing.Mapping[str, str] | None = None,
604
+ retries: Retry | bool | int | None = None,
605
+ redirect: bool = True,
606
+ assert_same_host: bool = True,
607
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
608
+ pool_timeout: int | None = None,
609
+ release_conn: bool | None = None,
610
+ chunked: bool = False,
611
+ body_pos: _TYPE_BODY_POSITION | None = None,
612
+ preload_content: bool = True,
613
+ decode_content: bool = True,
614
+ **response_kw: typing.Any,
615
+ ) -> BaseHTTPResponse:
616
+ """
617
+ Get a connection from the pool and perform an HTTP request. This is the
618
+ lowest level call for making a request, so you'll need to specify all
619
+ the raw details.
620
+
621
+ .. note::
622
+
623
+ More commonly, it's appropriate to use a convenience method
624
+ such as :meth:`request`.
625
+
626
+ .. note::
627
+
628
+ `release_conn` will only behave as expected if
629
+ `preload_content=False` because we want to make
630
+ `preload_content=False` the default behaviour someday soon without
631
+ breaking backwards compatibility.
632
+
633
+ :param method:
634
+ HTTP request method (such as GET, POST, PUT, etc.)
635
+
636
+ :param url:
637
+ The URL to perform the request on.
638
+
639
+ :param body:
640
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
641
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
642
+
643
+ :param headers:
644
+ Dictionary of custom headers to send, such as User-Agent,
645
+ If-None-Match, etc. If None, pool headers are used. If provided,
646
+ these headers completely replace any pool-specific headers.
647
+
648
+ :param retries:
649
+ Configure the number of retries to allow before raising a
650
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
651
+
652
+ If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a
653
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
654
+ over different types of retries.
655
+ Pass an integer number to retry connection errors that many times,
656
+ but no other types of errors. Pass zero to never retry.
657
+
658
+ If ``False``, then retries are disabled and any exception is raised
659
+ immediately. Also, instead of raising a MaxRetryError on redirects,
660
+ the redirect response will be returned.
661
+
662
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
663
+
664
+ :param redirect:
665
+ If True, automatically handle redirects (status codes 301, 302,
666
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
667
+ will disable redirect, too.
668
+
669
+ :param assert_same_host:
670
+ If ``True``, will make sure that the host of the pool requests is
671
+ consistent else will raise HostChangedError. When ``False``, you can
672
+ use the pool on an HTTP proxy and request foreign hosts.
673
+
674
+ :param timeout:
675
+ If specified, overrides the default timeout for this one
676
+ request. It may be a float (in seconds) or an instance of
677
+ :class:`urllib3.util.Timeout`.
678
+
679
+ :param pool_timeout:
680
+ If set and the pool is set to block=True, then this method will
681
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
682
+ connection is available within the time period.
683
+
684
+ :param bool preload_content:
685
+ If True, the response's body will be preloaded into memory.
686
+
687
+ :param bool decode_content:
688
+ If True, will attempt to decode the body based on the
689
+ 'content-encoding' header.
690
+
691
+ :param release_conn:
692
+ If False, then the urlopen call will not release the connection
693
+ back into the pool once a response is received (but will release if
694
+ you read the entire contents of the response such as when
695
+ `preload_content=True`). This is useful if you're not preloading
696
+ the response's content immediately. You will need to call
697
+ ``r.release_conn()`` on the response ``r`` to return the connection
698
+ back into the pool. If None, it takes the value of ``preload_content``
699
+ which defaults to ``True``.
700
+
701
+ :param bool chunked:
702
+ If True, urllib3 will send the body using chunked transfer
703
+ encoding. Otherwise, urllib3 will send the body using the standard
704
+ content-length form. Defaults to False.
705
+
706
+ :param int body_pos:
707
+ Position to seek to in file-like body in the event of a retry or
708
+ redirect. Typically this won't need to be set because urllib3 will
709
+ auto-populate the value when needed.
710
+ """
711
+ parsed_url = parse_url(url)
712
+ destination_scheme = parsed_url.scheme
713
+
714
+ if headers is None:
715
+ headers = self.headers
716
+
717
+ if not isinstance(retries, Retry):
718
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
719
+
720
+ if release_conn is None:
721
+ release_conn = preload_content
722
+
723
+ # Check host
724
+ if assert_same_host and not self.is_same_host(url):
725
+ raise HostChangedError(self, url, retries)
726
+
727
+ # Ensure that the URL we're connecting to is properly encoded
728
+ if url.startswith("/"):
729
+ url = to_str(_encode_target(url))
730
+ else:
731
+ url = to_str(parsed_url.url)
732
+
733
+ conn = None
734
+
735
+ # Track whether `conn` needs to be released before
736
+ # returning/raising/recursing. Update this variable if necessary, and
737
+ # leave `release_conn` constant throughout the function. That way, if
738
+ # the function recurses, the original value of `release_conn` will be
739
+ # passed down into the recursive call, and its value will be respected.
740
+ #
741
+ # See issue #651 [1] for details.
742
+ #
743
+ # [1] <https://github.com/urllib3/urllib3/issues/651>
744
+ release_this_conn = release_conn
745
+
746
+ http_tunnel_required = connection_requires_http_tunnel(
747
+ self.proxy, self.proxy_config, destination_scheme
748
+ )
749
+
750
+ # Merge the proxy headers. Only done when not using HTTP CONNECT. We
751
+ # have to copy the headers dict so we can safely change it without those
752
+ # changes being reflected in anyone else's copy.
753
+ if not http_tunnel_required:
754
+ headers = headers.copy() # type: ignore[attr-defined]
755
+ headers.update(self.proxy_headers) # type: ignore[union-attr]
756
+
757
+ # Must keep the exception bound to a separate variable or else Python 3
758
+ # complains about UnboundLocalError.
759
+ err = None
760
+
761
+ # Keep track of whether we cleanly exited the except block. This
762
+ # ensures we do proper cleanup in finally.
763
+ clean_exit = False
764
+
765
+ # Rewind body position, if needed. Record current position
766
+ # for future rewinds in the event of a redirect/retry.
767
+ body_pos = set_file_position(body, body_pos)
768
+
769
+ try:
770
+ # Request a connection from the queue.
771
+ timeout_obj = self._get_timeout(timeout)
772
+ conn = self._get_conn(timeout=pool_timeout)
773
+
774
+ conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]
775
+
776
+ # Is this a closed/new connection that requires CONNECT tunnelling?
777
+ if self.proxy is not None and http_tunnel_required and conn.is_closed:
778
+ try:
779
+ self._prepare_proxy(conn)
780
+ except (BaseSSLError, OSError, SocketTimeout) as e:
781
+ self._raise_timeout(
782
+ err=e, url=self.proxy.url, timeout_value=conn.timeout
783
+ )
784
+ raise
785
+
786
+ # If we're going to release the connection in ``finally:``, then
787
+ # the response doesn't need to know about the connection. Otherwise
788
+ # it will also try to release it and we'll have a double-release
789
+ # mess.
790
+ response_conn = conn if not release_conn else None
791
+
792
+ # Make the request on the HTTPConnection object
793
+ response = self._make_request(
794
+ conn,
795
+ method,
796
+ url,
797
+ timeout=timeout_obj,
798
+ body=body,
799
+ headers=headers,
800
+ chunked=chunked,
801
+ retries=retries,
802
+ response_conn=response_conn,
803
+ preload_content=preload_content,
804
+ decode_content=decode_content,
805
+ **response_kw,
806
+ )
807
+
808
+ # Everything went great!
809
+ clean_exit = True
810
+
811
+ except EmptyPoolError:
812
+ # Didn't get a connection from the pool, no need to clean up
813
+ clean_exit = True
814
+ release_this_conn = False
815
+ raise
816
+
817
+ except (
818
+ TimeoutError,
819
+ HTTPException,
820
+ OSError,
821
+ ProtocolError,
822
+ BaseSSLError,
823
+ SSLError,
824
+ CertificateError,
825
+ ProxyError,
826
+ ) as e:
827
+ # Discard the connection for these exceptions. It will be
828
+ # replaced during the next _get_conn() call.
829
+ clean_exit = False
830
+ new_e: Exception = e
831
+ if isinstance(e, (BaseSSLError, CertificateError)):
832
+ new_e = SSLError(e)
833
+ if isinstance(
834
+ new_e,
835
+ (
836
+ OSError,
837
+ NewConnectionError,
838
+ TimeoutError,
839
+ SSLError,
840
+ HTTPException,
841
+ ),
842
+ ) and (conn and conn.proxy and not conn.has_connected_to_proxy):
843
+ new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
844
+ elif isinstance(new_e, (OSError, HTTPException)):
845
+ new_e = ProtocolError("Connection aborted.", new_e)
846
+
847
+ retries = retries.increment(
848
+ method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
849
+ )
850
+ retries.sleep()
851
+
852
+ # Keep track of the error for the retry warning.
853
+ err = e
854
+
855
+ finally:
856
+ if not clean_exit:
857
+ # We hit some kind of exception, handled or otherwise. We need
858
+ # to throw the connection away unless explicitly told not to.
859
+ # Close the connection, set the variable to None, and make sure
860
+ # we put the None back in the pool to avoid leaking it.
861
+ if conn:
862
+ conn.close()
863
+ conn = None
864
+ release_this_conn = True
865
+
866
+ if release_this_conn:
867
+ # Put the connection back to be reused. If the connection is
868
+ # expired then it will be None, which will get replaced with a
869
+ # fresh connection during _get_conn.
870
+ self._put_conn(conn)
871
+
872
+ if not conn:
873
+ # Try again
874
+ log.warning(
875
+ "Retrying (%r) after connection broken by '%r': %s", retries, err, url
876
+ )
877
+ return self.urlopen(
878
+ method,
879
+ url,
880
+ body,
881
+ headers,
882
+ retries,
883
+ redirect,
884
+ assert_same_host,
885
+ timeout=timeout,
886
+ pool_timeout=pool_timeout,
887
+ release_conn=release_conn,
888
+ chunked=chunked,
889
+ body_pos=body_pos,
890
+ preload_content=preload_content,
891
+ decode_content=decode_content,
892
+ **response_kw,
893
+ )
894
+
895
+ # Handle redirect?
896
+ redirect_location = redirect and response.get_redirect_location()
897
+ if redirect_location:
898
+ if response.status == 303:
899
+ # Change the method according to RFC 9110, Section 15.4.4.
900
+ method = "GET"
901
+ # And lose the body not to transfer anything sensitive.
902
+ body = None
903
+ headers = HTTPHeaderDict(headers)._prepare_for_method_change()
904
+
905
+ try:
906
+ retries = retries.increment(method, url, response=response, _pool=self)
907
+ except MaxRetryError:
908
+ if retries.raise_on_redirect:
909
+ response.drain_conn()
910
+ raise
911
+ return response
912
+
913
+ response.drain_conn()
914
+ retries.sleep_for_retry(response)
915
+ log.debug("Redirecting %s -> %s", url, redirect_location)
916
+ return self.urlopen(
917
+ method,
918
+ redirect_location,
919
+ body,
920
+ headers,
921
+ retries=retries,
922
+ redirect=redirect,
923
+ assert_same_host=assert_same_host,
924
+ timeout=timeout,
925
+ pool_timeout=pool_timeout,
926
+ release_conn=release_conn,
927
+ chunked=chunked,
928
+ body_pos=body_pos,
929
+ preload_content=preload_content,
930
+ decode_content=decode_content,
931
+ **response_kw,
932
+ )
933
+
934
+ # Check if we should retry the HTTP response.
935
+ has_retry_after = bool(response.headers.get("Retry-After"))
936
+ if retries.is_retry(method, response.status, has_retry_after):
937
+ try:
938
+ retries = retries.increment(method, url, response=response, _pool=self)
939
+ except MaxRetryError:
940
+ if retries.raise_on_status:
941
+ response.drain_conn()
942
+ raise
943
+ return response
944
+
945
+ response.drain_conn()
946
+ retries.sleep(response)
947
+ log.debug("Retry: %s", url)
948
+ return self.urlopen(
949
+ method,
950
+ url,
951
+ body,
952
+ headers,
953
+ retries=retries,
954
+ redirect=redirect,
955
+ assert_same_host=assert_same_host,
956
+ timeout=timeout,
957
+ pool_timeout=pool_timeout,
958
+ release_conn=release_conn,
959
+ chunked=chunked,
960
+ body_pos=body_pos,
961
+ preload_content=preload_content,
962
+ decode_content=decode_content,
963
+ **response_kw,
964
+ )
965
+
966
+ return response
967
+
968
+
969
+ class HTTPSConnectionPool(HTTPConnectionPool):
970
+ """
971
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
972
+
973
+ :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
974
+ ``assert_hostname`` and ``host`` in this order to verify connections.
975
+ If ``assert_hostname`` is False, no verification is done.
976
+
977
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
978
+ ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
979
+ is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
980
+ the connection socket into an SSL socket.
981
+ """
982
+
983
+ scheme = "https"
984
+ ConnectionCls: type[BaseHTTPSConnection] = HTTPSConnection
985
+
986
+ def __init__(
987
+ self,
988
+ host: str,
989
+ port: int | None = None,
990
+ timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
991
+ maxsize: int = 1,
992
+ block: bool = False,
993
+ headers: typing.Mapping[str, str] | None = None,
994
+ retries: Retry | bool | int | None = None,
995
+ _proxy: Url | None = None,
996
+ _proxy_headers: typing.Mapping[str, str] | None = None,
997
+ key_file: str | None = None,
998
+ cert_file: str | None = None,
999
+ cert_reqs: int | str | None = None,
1000
+ key_password: str | None = None,
1001
+ ca_certs: str | None = None,
1002
+ ssl_version: int | str | None = None,
1003
+ ssl_minimum_version: ssl.TLSVersion | None = None,
1004
+ ssl_maximum_version: ssl.TLSVersion | None = None,
1005
+ assert_hostname: str | Literal[False] | None = None,
1006
+ assert_fingerprint: str | None = None,
1007
+ ca_cert_dir: str | None = None,
1008
+ **conn_kw: typing.Any,
1009
+ ) -> None:
1010
+ super().__init__(
1011
+ host,
1012
+ port,
1013
+ timeout,
1014
+ maxsize,
1015
+ block,
1016
+ headers,
1017
+ retries,
1018
+ _proxy,
1019
+ _proxy_headers,
1020
+ **conn_kw,
1021
+ )
1022
+
1023
+ self.key_file = key_file
1024
+ self.cert_file = cert_file
1025
+ self.cert_reqs = cert_reqs
1026
+ self.key_password = key_password
1027
+ self.ca_certs = ca_certs
1028
+ self.ca_cert_dir = ca_cert_dir
1029
+ self.ssl_version = ssl_version
1030
+ self.ssl_minimum_version = ssl_minimum_version
1031
+ self.ssl_maximum_version = ssl_maximum_version
1032
+ self.assert_hostname = assert_hostname
1033
+ self.assert_fingerprint = assert_fingerprint
1034
+
1035
+ def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]
1036
+ """Establishes a tunnel connection through HTTP CONNECT."""
1037
+ if self.proxy and self.proxy.scheme == "https":
1038
+ tunnel_scheme = "https"
1039
+ else:
1040
+ tunnel_scheme = "http"
1041
+
1042
+ conn.set_tunnel(
1043
+ scheme=tunnel_scheme,
1044
+ host=self._tunnel_host,
1045
+ port=self.port,
1046
+ headers=self.proxy_headers,
1047
+ )
1048
+ conn.connect()
1049
+
1050
+ def _new_conn(self) -> BaseHTTPSConnection:
1051
+ """
1052
+ Return a fresh :class:`urllib3.connection.HTTPConnection`.
1053
+ """
1054
+ self.num_connections += 1
1055
+ log.debug(
1056
+ "Starting new HTTPS connection (%d): %s:%s",
1057
+ self.num_connections,
1058
+ self.host,
1059
+ self.port or "443",
1060
+ )
1061
+
1062
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]
1063
+ raise ImportError(
1064
+ "Can't connect to HTTPS URL because the SSL module is not available."
1065
+ )
1066
+
1067
+ actual_host: str = self.host
1068
+ actual_port = self.port
1069
+ if self.proxy is not None and self.proxy.host is not None:
1070
+ actual_host = self.proxy.host
1071
+ actual_port = self.proxy.port
1072
+
1073
+ return self.ConnectionCls(
1074
+ host=actual_host,
1075
+ port=actual_port,
1076
+ timeout=self.timeout.connect_timeout,
1077
+ cert_file=self.cert_file,
1078
+ key_file=self.key_file,
1079
+ key_password=self.key_password,
1080
+ cert_reqs=self.cert_reqs,
1081
+ ca_certs=self.ca_certs,
1082
+ ca_cert_dir=self.ca_cert_dir,
1083
+ assert_hostname=self.assert_hostname,
1084
+ assert_fingerprint=self.assert_fingerprint,
1085
+ ssl_version=self.ssl_version,
1086
+ ssl_minimum_version=self.ssl_minimum_version,
1087
+ ssl_maximum_version=self.ssl_maximum_version,
1088
+ **self.conn_kw,
1089
+ )
1090
+
1091
+ def _validate_conn(self, conn: BaseHTTPConnection) -> None:
1092
+ """
1093
+ Called right before a request is made, after the socket is created.
1094
+ """
1095
+ super()._validate_conn(conn)
1096
+
1097
+ # Force connect early to allow us to validate the connection.
1098
+ if conn.is_closed:
1099
+ conn.connect()
1100
+
1101
+ # TODO revise this, see https://github.com/urllib3/urllib3/issues/2791
1102
+ if not conn.is_verified and not conn.proxy_is_verified:
1103
+ warnings.warn(
1104
+ (
1105
+ f"Unverified HTTPS request is being made to host '{conn.host}'. "
1106
+ "Adding certificate verification is strongly advised. See: "
1107
+ "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
1108
+ "#tls-warnings"
1109
+ ),
1110
+ InsecureRequestWarning,
1111
+ )
1112
+
1113
+
1114
+ def connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool:
1115
+ """
1116
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
1117
+
1118
+ This is a shortcut for not having to parse out the scheme, host, and port
1119
+ of the url before creating an :class:`.ConnectionPool` instance.
1120
+
1121
+ :param url:
1122
+ Absolute URL string that must include the scheme. Port is optional.
1123
+
1124
+ :param \\**kw:
1125
+ Passes additional parameters to the constructor of the appropriate
1126
+ :class:`.ConnectionPool`. Useful for specifying things like
1127
+ timeout, maxsize, headers, etc.
1128
+
1129
+ Example::
1130
+
1131
+ >>> conn = connection_from_url('http://google.com/')
1132
+ >>> r = conn.request('GET', '/')
1133
+ """
1134
+ scheme, _, host, port, *_ = parse_url(url)
1135
+ scheme = scheme or "http"
1136
+ port = port or port_by_scheme.get(scheme, 80)
1137
+ if scheme == "https":
1138
+ return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
1139
+ else:
1140
+ return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
1141
+
1142
+
1143
+ @typing.overload
1144
+ def _normalize_host(host: None, scheme: str | None) -> None:
1145
+ ...
1146
+
1147
+
1148
+ @typing.overload
1149
+ def _normalize_host(host: str, scheme: str | None) -> str:
1150
+ ...
1151
+
1152
+
1153
+ def _normalize_host(host: str | None, scheme: str | None) -> str | None:
1154
+ """
1155
+ Normalize hosts for comparisons and use with sockets.
1156
+ """
1157
+
1158
+ host = normalize_host(host, scheme)
1159
+
1160
+ # httplib doesn't like it when we include brackets in IPv6 addresses
1161
+ # Specifically, if we include brackets but also pass the port then
1162
+ # httplib crazily doubles up the square brackets on the Host header.
1163
+ # Instead, we need to make sure we never pass ``None`` as the port.
1164
+ # However, for backward compatibility reasons we can't actually
1165
+ # *assert* that. See http://bugs.python.org/issue28539
1166
+ if host and host.startswith("[") and host.endswith("]"):
1167
+ host = host[1:-1]
1168
+ return host
1169
+
1170
+
1171
+ def _url_from_pool(
1172
+ pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None
1173
+ ) -> str:
1174
+ """Returns the URL from a given connection pool. This is mainly used for testing and logging."""
1175
+ return Url(scheme=pool.scheme, host=pool.host, port=pool.port, path=path).url
1176
+
1177
+
1178
+ def _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None:
1179
+ """Drains a queue of connections and closes each one."""
1180
+ try:
1181
+ while True:
1182
+ conn = pool.get(block=False)
1183
+ if conn:
1184
+ conn.close()
1185
+ except queue.Empty:
1186
+ pass # Done.
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/__pycache__/socks.cpython-310.pyc ADDED
Binary file (6.17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (727 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/connection.cpython-310.pyc ADDED
Binary file (7.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/fetch.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/request.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/__pycache__/response.cpython-310.pyc ADDED
Binary file (7.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/urllib3/contrib/emscripten/fetch.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Support for streaming http requests in emscripten.
3
+
4
+ A few caveats -
5
+
6
+ Firstly, you can't do streaming http in the main UI thread, because atomics.wait isn't allowed.
7
+ Streaming only works if you're running pyodide in a web worker.
8
+
9
+ Secondly, this uses an extra web worker and SharedArrayBuffer to do the asynchronous fetch
10
+ operation, so it requires that you have crossOriginIsolation enabled, by serving over https
11
+ (or from localhost) with the two headers below set:
12
+
13
+ Cross-Origin-Opener-Policy: same-origin
14
+ Cross-Origin-Embedder-Policy: require-corp
15
+
16
+ You can tell if cross origin isolation is successfully enabled by looking at the global crossOriginIsolated variable in
17
+ javascript console. If it isn't, streaming requests will fallback to XMLHttpRequest, i.e. getting the whole
18
+ request into a buffer and then returning it. it shows a warning in the javascript console in this case.
19
+
20
+ Finally, the webworker which does the streaming fetch is created on initial import, but will only be started once
21
+ control is returned to javascript. Call `await wait_for_streaming_ready()` to wait for streaming fetch.
22
+
23
+ NB: in this code, there are a lot of javascript objects. They are named js_*
24
+ to make it clear what type of object they are.
25
+ """
26
+ from __future__ import annotations
27
+
28
+ import io
29
+ import json
30
+ from email.parser import Parser
31
+ from importlib.resources import files
32
+ from typing import TYPE_CHECKING, Any
33
+
34
+ import js # type: ignore[import-not-found]
35
+ from pyodide.ffi import ( # type: ignore[import-not-found]
36
+ JsArray,
37
+ JsException,
38
+ JsProxy,
39
+ to_js,
40
+ )
41
+
42
+ if TYPE_CHECKING:
43
+ from typing_extensions import Buffer
44
+
45
+ from .request import EmscriptenRequest
46
+ from .response import EmscriptenResponse
47
+
48
+ """
49
+ There are some headers that trigger unintended CORS preflight requests.
50
+ See also https://github.com/koenvo/pyodide-http/issues/22
51
+ """
52
+ HEADERS_TO_IGNORE = ("user-agent",)
53
+
54
+ SUCCESS_HEADER = -1
55
+ SUCCESS_EOF = -2
56
+ ERROR_TIMEOUT = -3
57
+ ERROR_EXCEPTION = -4
58
+
59
+ _STREAMING_WORKER_CODE = (
60
+ files(__package__)
61
+ .joinpath("emscripten_fetch_worker.js")
62
+ .read_text(encoding="utf-8")
63
+ )
64
+
65
+
66
+ class _RequestError(Exception):
67
+ def __init__(
68
+ self,
69
+ message: str | None = None,
70
+ *,
71
+ request: EmscriptenRequest | None = None,
72
+ response: EmscriptenResponse | None = None,
73
+ ):
74
+ self.request = request
75
+ self.response = response
76
+ self.message = message
77
+ super().__init__(self.message)
78
+
79
+
80
+ class _StreamingError(_RequestError):
81
+ pass
82
+
83
+
84
+ class _TimeoutError(_RequestError):
85
+ pass
86
+
87
+
88
+ def _obj_from_dict(dict_val: dict[str, Any]) -> JsProxy:
89
+ return to_js(dict_val, dict_converter=js.Object.fromEntries)
90
+
91
+
92
+ class _ReadStream(io.RawIOBase):
93
+ def __init__(
94
+ self,
95
+ int_buffer: JsArray,
96
+ byte_buffer: JsArray,
97
+ timeout: float,
98
+ worker: JsProxy,
99
+ connection_id: int,
100
+ request: EmscriptenRequest,
101
+ ):
102
+ self.int_buffer = int_buffer
103
+ self.byte_buffer = byte_buffer
104
+ self.read_pos = 0
105
+ self.read_len = 0
106
+ self.connection_id = connection_id
107
+ self.worker = worker
108
+ self.timeout = int(1000 * timeout) if timeout > 0 else None
109
+ self.is_live = True
110
+ self._is_closed = False
111
+ self.request: EmscriptenRequest | None = request
112
+
113
+ def __del__(self) -> None:
114
+ self.close()
115
+
116
+ # this is compatible with _base_connection
117
+ def is_closed(self) -> bool:
118
+ return self._is_closed
119
+
120
+ # for compatibility with RawIOBase
121
+ @property
122
+ def closed(self) -> bool:
123
+ return self.is_closed()
124
+
125
+ def close(self) -> None:
126
+ if not self.is_closed():
127
+ self.read_len = 0
128
+ self.read_pos = 0
129
+ self.int_buffer = None
130
+ self.byte_buffer = None
131
+ self._is_closed = True
132
+ self.request = None
133
+ if self.is_live:
134
+ self.worker.postMessage(_obj_from_dict({"close": self.connection_id}))
135
+ self.is_live = False
136
+ super().close()
137
+
138
+ def readable(self) -> bool:
139
+ return True
140
+
141
+ def writable(self) -> bool:
142
+ return False
143
+
144
+ def seekable(self) -> bool:
145
+ return False
146
+
147
+ def readinto(self, byte_obj: Buffer) -> int:
148
+ if not self.int_buffer:
149
+ raise _StreamingError(
150
+ "No buffer for stream in _ReadStream.readinto",
151
+ request=self.request,
152
+ response=None,
153
+ )
154
+ if self.read_len == 0:
155
+ # wait for the worker to send something
156
+ js.Atomics.store(self.int_buffer, 0, ERROR_TIMEOUT)
157
+ self.worker.postMessage(_obj_from_dict({"getMore": self.connection_id}))
158
+ if (
159
+ js.Atomics.wait(self.int_buffer, 0, ERROR_TIMEOUT, self.timeout)
160
+ == "timed-out"
161
+ ):
162
+ raise _TimeoutError
163
+ data_len = self.int_buffer[0]
164
+ if data_len > 0:
165
+ self.read_len = data_len
166
+ self.read_pos = 0
167
+ elif data_len == ERROR_EXCEPTION:
168
+ string_len = self.int_buffer[1]
169
+ # decode the error string
170
+ js_decoder = js.TextDecoder.new()
171
+ json_str = js_decoder.decode(self.byte_buffer.slice(0, string_len))
172
+ raise _StreamingError(
173
+ f"Exception thrown in fetch: {json_str}",
174
+ request=self.request,
175
+ response=None,
176
+ )
177
+ else:
178
+ # EOF, free the buffers and return zero
179
+ # and free the request
180
+ self.is_live = False
181
+ self.close()
182
+ return 0
183
+ # copy from int32array to python bytes
184
+ ret_length = min(self.read_len, len(memoryview(byte_obj)))
185
+ subarray = self.byte_buffer.subarray(
186
+ self.read_pos, self.read_pos + ret_length
187
+ ).to_py()
188
+ memoryview(byte_obj)[0:ret_length] = subarray
189
+ self.read_len -= ret_length
190
+ self.read_pos += ret_length
191
+ return ret_length
192
+
193
+
194
+ class _StreamingFetcher:
195
+ def __init__(self) -> None:
196
+ # make web-worker and data buffer on startup
197
+ self.streaming_ready = False
198
+
199
+ js_data_blob = js.Blob.new(
200
+ [_STREAMING_WORKER_CODE], _obj_from_dict({"type": "application/javascript"})
201
+ )
202
+
203
+ def promise_resolver(js_resolve_fn: JsProxy, js_reject_fn: JsProxy) -> None:
204
+ def onMsg(e: JsProxy) -> None:
205
+ self.streaming_ready = True
206
+ js_resolve_fn(e)
207
+
208
+ def onErr(e: JsProxy) -> None:
209
+ js_reject_fn(e) # Defensive: never happens in ci
210
+
211
+ self.js_worker.onmessage = onMsg
212
+ self.js_worker.onerror = onErr
213
+
214
+ js_data_url = js.URL.createObjectURL(js_data_blob)
215
+ self.js_worker = js.globalThis.Worker.new(js_data_url)
216
+ self.js_worker_ready_promise = js.globalThis.Promise.new(promise_resolver)
217
+
218
+ def send(self, request: EmscriptenRequest) -> EmscriptenResponse:
219
+ headers = {
220
+ k: v for k, v in request.headers.items() if k not in HEADERS_TO_IGNORE
221
+ }
222
+
223
+ body = request.body
224
+ fetch_data = {"headers": headers, "body": to_js(body), "method": request.method}
225
+ # start the request off in the worker
226
+ timeout = int(1000 * request.timeout) if request.timeout > 0 else None
227
+ js_shared_buffer = js.SharedArrayBuffer.new(1048576)
228
+ js_int_buffer = js.Int32Array.new(js_shared_buffer)
229
+ js_byte_buffer = js.Uint8Array.new(js_shared_buffer, 8)
230
+
231
+ js.Atomics.store(js_int_buffer, 0, ERROR_TIMEOUT)
232
+ js.Atomics.notify(js_int_buffer, 0)
233
+ js_absolute_url = js.URL.new(request.url, js.location).href
234
+ self.js_worker.postMessage(
235
+ _obj_from_dict(
236
+ {
237
+ "buffer": js_shared_buffer,
238
+ "url": js_absolute_url,
239
+ "fetchParams": fetch_data,
240
+ }
241
+ )
242
+ )
243
+ # wait for the worker to send something
244
+ js.Atomics.wait(js_int_buffer, 0, ERROR_TIMEOUT, timeout)
245
+ if js_int_buffer[0] == ERROR_TIMEOUT:
246
+ raise _TimeoutError(
247
+ "Timeout connecting to streaming request",
248
+ request=request,
249
+ response=None,
250
+ )
251
+ elif js_int_buffer[0] == SUCCESS_HEADER:
252
+ # got response
253
+ # header length is in second int of intBuffer
254
+ string_len = js_int_buffer[1]
255
+ # decode the rest to a JSON string
256
+ js_decoder = js.TextDecoder.new()
257
+ # this does a copy (the slice) because decode can't work on shared array
258
+ # for some silly reason
259
+ json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len))
260
+ # get it as an object
261
+ response_obj = json.loads(json_str)
262
+ return EmscriptenResponse(
263
+ request=request,
264
+ status_code=response_obj["status"],
265
+ headers=response_obj["headers"],
266
+ body=_ReadStream(
267
+ js_int_buffer,
268
+ js_byte_buffer,
269
+ request.timeout,
270
+ self.js_worker,
271
+ response_obj["connectionID"],
272
+ request,
273
+ ),
274
+ )
275
+ elif js_int_buffer[0] == ERROR_EXCEPTION:
276
+ string_len = js_int_buffer[1]
277
+ # decode the error string
278
+ js_decoder = js.TextDecoder.new()
279
+ json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len))
280
+ raise _StreamingError(
281
+ f"Exception thrown in fetch: {json_str}", request=request, response=None
282
+ )
283
+ else:
284
+ raise _StreamingError(
285
+ f"Unknown status from worker in fetch: {js_int_buffer[0]}",
286
+ request=request,
287
+ response=None,
288
+ )
289
+
290
+
291
+ # check if we are in a worker or not
292
+ def is_in_browser_main_thread() -> bool:
293
+ return hasattr(js, "window") and hasattr(js, "self") and js.self == js.window
294
+
295
+
296
+ def is_cross_origin_isolated() -> bool:
297
+ return hasattr(js, "crossOriginIsolated") and js.crossOriginIsolated
298
+
299
+
300
+ def is_in_node() -> bool:
301
+ return (
302
+ hasattr(js, "process")
303
+ and hasattr(js.process, "release")
304
+ and hasattr(js.process.release, "name")
305
+ and js.process.release.name == "node"
306
+ )
307
+
308
+
309
+ def is_worker_available() -> bool:
310
+ return hasattr(js, "Worker") and hasattr(js, "Blob")
311
+
312
+
313
+ _fetcher: _StreamingFetcher | None = None
314
+
315
+ if is_worker_available() and (
316
+ (is_cross_origin_isolated() and not is_in_browser_main_thread())
317
+ and (not is_in_node())
318
+ ):
319
+ _fetcher = _StreamingFetcher()
320
+ else:
321
+ _fetcher = None
322
+
323
+
324
+ def send_streaming_request(request: EmscriptenRequest) -> EmscriptenResponse | None:
325
+ if _fetcher and streaming_ready():
326
+ return _fetcher.send(request)
327
+ else:
328
+ _show_streaming_warning()
329
+ return None
330
+
331
+
332
+ _SHOWN_TIMEOUT_WARNING = False
333
+
334
+
335
+ def _show_timeout_warning() -> None:
336
+ global _SHOWN_TIMEOUT_WARNING
337
+ if not _SHOWN_TIMEOUT_WARNING:
338
+ _SHOWN_TIMEOUT_WARNING = True
339
+ message = "Warning: Timeout is not available on main browser thread"
340
+ js.console.warn(message)
341
+
342
+
343
+ _SHOWN_STREAMING_WARNING = False
344
+
345
+
346
+ def _show_streaming_warning() -> None:
347
+ global _SHOWN_STREAMING_WARNING
348
+ if not _SHOWN_STREAMING_WARNING:
349
+ _SHOWN_STREAMING_WARNING = True
350
+ message = "Can't stream HTTP requests because: \n"
351
+ if not is_cross_origin_isolated():
352
+ message += " Page is not cross-origin isolated\n"
353
+ if is_in_browser_main_thread():
354
+ message += " Python is running in main browser thread\n"
355
+ if not is_worker_available():
356
+ message += " Worker or Blob classes are not available in this environment." # Defensive: this is always False in browsers that we test in
357
+ if streaming_ready() is False:
358
+ message += """ Streaming fetch worker isn't ready. If you want to be sure that streaming fetch
359
+ is working, you need to call: 'await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready()`"""
360
+ from js import console
361
+
362
+ console.warn(message)
363
+
364
+
365
+ def send_request(request: EmscriptenRequest) -> EmscriptenResponse:
366
+ try:
367
+ js_xhr = js.XMLHttpRequest.new()
368
+
369
+ if not is_in_browser_main_thread():
370
+ js_xhr.responseType = "arraybuffer"
371
+ if request.timeout:
372
+ js_xhr.timeout = int(request.timeout * 1000)
373
+ else:
374
+ js_xhr.overrideMimeType("text/plain; charset=ISO-8859-15")
375
+ if request.timeout:
376
+ # timeout isn't available on the main thread - show a warning in console
377
+ # if it is set
378
+ _show_timeout_warning()
379
+
380
+ js_xhr.open(request.method, request.url, False)
381
+ for name, value in request.headers.items():
382
+ if name.lower() not in HEADERS_TO_IGNORE:
383
+ js_xhr.setRequestHeader(name, value)
384
+
385
+ js_xhr.send(to_js(request.body))
386
+
387
+ headers = dict(Parser().parsestr(js_xhr.getAllResponseHeaders()))
388
+
389
+ if not is_in_browser_main_thread():
390
+ body = js_xhr.response.to_py().tobytes()
391
+ else:
392
+ body = js_xhr.response.encode("ISO-8859-15")
393
+ return EmscriptenResponse(
394
+ status_code=js_xhr.status, headers=headers, body=body, request=request
395
+ )
396
+ except JsException as err:
397
+ if err.name == "TimeoutError":
398
+ raise _TimeoutError(err.message, request=request)
399
+ elif err.name == "NetworkError":
400
+ raise _RequestError(err.message, request=request)
401
+ else:
402
+ # general http error
403
+ raise _RequestError(err.message, request=request)
404
+
405
+
406
+ def streaming_ready() -> bool | None:
407
+ if _fetcher:
408
+ return _fetcher.streaming_ready
409
+ else:
410
+ return None # no fetcher, return None to signify that
411
+
412
+
413
+ async def wait_for_streaming_ready() -> bool:
414
+ if _fetcher:
415
+ await _fetcher.js_worker_ready_promise
416
+ return True
417
+ else:
418
+ return False