applied-ai-018 commited on
Commit
7c06153
·
verified ·
1 Parent(s): 5bc16d0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/__pycache__/__editable___lm_eval_0_4_2_finder.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/__pycache__/sqlitedict.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/necompiler.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so +3 -0
  10. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/_internal_utils.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/adapters.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/auth.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/hooks.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/models.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/sessions.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/status_codes.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/structures.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/requests/__pycache__/utils.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/requests/__version__.py +14 -0
  21. env-llmeval/lib/python3.10/site-packages/requests/_internal_utils.py +50 -0
  22. env-llmeval/lib/python3.10/site-packages/requests/api.py +157 -0
  23. env-llmeval/lib/python3.10/site-packages/requests/auth.py +315 -0
  24. env-llmeval/lib/python3.10/site-packages/requests/certs.py +17 -0
  25. env-llmeval/lib/python3.10/site-packages/requests/compat.py +79 -0
  26. env-llmeval/lib/python3.10/site-packages/requests/exceptions.py +141 -0
  27. env-llmeval/lib/python3.10/site-packages/requests/help.py +134 -0
  28. env-llmeval/lib/python3.10/site-packages/requests/hooks.py +33 -0
  29. env-llmeval/lib/python3.10/site-packages/requests/packages.py +28 -0
  30. env-llmeval/lib/python3.10/site-packages/requests/sessions.py +833 -0
  31. env-llmeval/lib/python3.10/site-packages/requests/status_codes.py +128 -0
  32. env-llmeval/lib/python3.10/site-packages/requests/utils.py +1094 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__init__.py +76 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/__init__.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/configuration_dpt.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dinov2_depth_to_hf.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_beit_to_hf.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_hybrid_to_pytorch.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_swinv2_to_hf.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_to_pytorch.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/feature_extraction_dpt.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/image_processing_dpt.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/modeling_dpt.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/configuration_dpt.py +284 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_beit_to_hf.py +306 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py +316 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_swinv2_to_hf.py +322 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_to_pytorch.py +286 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/feature_extraction_dpt.py +33 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/image_processing_dpt.py +484 -0
.gitattributes CHANGED
@@ -218,3 +218,4 @@ env-llmeval/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-
218
  env-llmeval/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
219
  env-llmeval/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
220
  env-llmeval/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
 
 
218
  env-llmeval/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
219
  env-llmeval/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
220
  env-llmeval/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
221
+ env-llmeval/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/__pycache__/__editable___lm_eval_0_4_2_finder.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc ADDED
Binary file (9.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc ADDED
Binary file (27.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/__pycache__/sqlitedict.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc ADDED
Binary file (42.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc ADDED
Binary file (93.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/necompiler.cpython-310.pyc ADDED
Binary file (30.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcb0c6481fb539a0d205a98b8ad9860f3226a3b80ec3dd81200970415199cab5
3
+ size 1150680
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/_internal_utils.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/adapters.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/auth.cpython-310.pyc ADDED
Binary file (8.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (975 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/models.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/sessions.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/status_codes.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/structures.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__pycache__/utils.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/requests/__version__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # .-. .-. .-. . . .-. .-. .-. .-.
2
+ # |( |- |.| | | |- `-. | `-.
3
+ # ' ' `-' `-`.`-' `-' `-' ' `-'
4
+
5
+ __title__ = "requests"
6
+ __description__ = "Python HTTP for Humans."
7
+ __url__ = "https://requests.readthedocs.io"
8
+ __version__ = "2.31.0"
9
+ __build__ = 0x023100
10
+ __author__ = "Kenneth Reitz"
11
+ __author_email__ = "[email protected]"
12
+ __license__ = "Apache 2.0"
13
+ __copyright__ = "Copyright Kenneth Reitz"
14
+ __cake__ = "\u2728 \U0001f370 \u2728"
env-llmeval/lib/python3.10/site-packages/requests/_internal_utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests._internal_utils
3
+ ~~~~~~~~~~~~~~
4
+
5
+ Provides utility functions that are consumed internally by Requests
6
+ which depend on extremely few external helpers (such as compat)
7
+ """
8
+ import re
9
+
10
+ from .compat import builtin_str
11
+
12
+ _VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$")
13
+ _VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$")
14
+ _VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$")
15
+ _VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$")
16
+
17
+ _HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR)
18
+ _HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE)
19
+ HEADER_VALIDATORS = {
20
+ bytes: _HEADER_VALIDATORS_BYTE,
21
+ str: _HEADER_VALIDATORS_STR,
22
+ }
23
+
24
+
25
+ def to_native_string(string, encoding="ascii"):
26
+ """Given a string object, regardless of type, returns a representation of
27
+ that string in the native string type, encoding and decoding where
28
+ necessary. This assumes ASCII unless told otherwise.
29
+ """
30
+ if isinstance(string, builtin_str):
31
+ out = string
32
+ else:
33
+ out = string.decode(encoding)
34
+
35
+ return out
36
+
37
+
38
+ def unicode_is_ascii(u_string):
39
+ """Determine if unicode string only contains ASCII characters.
40
+
41
+ :param str u_string: unicode string to check. Must be unicode
42
+ and not Python 2 `str`.
43
+ :rtype: bool
44
+ """
45
+ assert isinstance(u_string, str)
46
+ try:
47
+ u_string.encode("ascii")
48
+ return True
49
+ except UnicodeEncodeError:
50
+ return False
env-llmeval/lib/python3.10/site-packages/requests/api.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.api
3
+ ~~~~~~~~~~~~
4
+
5
+ This module implements the Requests API.
6
+
7
+ :copyright: (c) 2012 by Kenneth Reitz.
8
+ :license: Apache2, see LICENSE for more details.
9
+ """
10
+
11
+ from . import sessions
12
+
13
+
14
+ def request(method, url, **kwargs):
15
+ """Constructs and sends a :class:`Request <Request>`.
16
+
17
+ :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
18
+ :param url: URL for the new :class:`Request` object.
19
+ :param params: (optional) Dictionary, list of tuples or bytes to send
20
+ in the query string for the :class:`Request`.
21
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
22
+ object to send in the body of the :class:`Request`.
23
+ :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
24
+ :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
25
+ :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
26
+ :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
27
+ ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
28
+ or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
29
+ defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
30
+ to add for the file.
31
+ :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
32
+ :param timeout: (optional) How many seconds to wait for the server to send data
33
+ before giving up, as a float, or a :ref:`(connect timeout, read
34
+ timeout) <timeouts>` tuple.
35
+ :type timeout: float or tuple
36
+ :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
37
+ :type allow_redirects: bool
38
+ :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
39
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
40
+ the server's TLS certificate, or a string, in which case it must be a path
41
+ to a CA bundle to use. Defaults to ``True``.
42
+ :param stream: (optional) if ``False``, the response content will be immediately downloaded.
43
+ :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
44
+ :return: :class:`Response <Response>` object
45
+ :rtype: requests.Response
46
+
47
+ Usage::
48
+
49
+ >>> import requests
50
+ >>> req = requests.request('GET', 'https://httpbin.org/get')
51
+ >>> req
52
+ <Response [200]>
53
+ """
54
+
55
+ # By using the 'with' statement we are sure the session is closed, thus we
56
+ # avoid leaving sockets open which can trigger a ResourceWarning in some
57
+ # cases, and look like a memory leak in others.
58
+ with sessions.Session() as session:
59
+ return session.request(method=method, url=url, **kwargs)
60
+
61
+
62
+ def get(url, params=None, **kwargs):
63
+ r"""Sends a GET request.
64
+
65
+ :param url: URL for the new :class:`Request` object.
66
+ :param params: (optional) Dictionary, list of tuples or bytes to send
67
+ in the query string for the :class:`Request`.
68
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
69
+ :return: :class:`Response <Response>` object
70
+ :rtype: requests.Response
71
+ """
72
+
73
+ return request("get", url, params=params, **kwargs)
74
+
75
+
76
+ def options(url, **kwargs):
77
+ r"""Sends an OPTIONS request.
78
+
79
+ :param url: URL for the new :class:`Request` object.
80
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
81
+ :return: :class:`Response <Response>` object
82
+ :rtype: requests.Response
83
+ """
84
+
85
+ return request("options", url, **kwargs)
86
+
87
+
88
+ def head(url, **kwargs):
89
+ r"""Sends a HEAD request.
90
+
91
+ :param url: URL for the new :class:`Request` object.
92
+ :param \*\*kwargs: Optional arguments that ``request`` takes. If
93
+ `allow_redirects` is not provided, it will be set to `False` (as
94
+ opposed to the default :meth:`request` behavior).
95
+ :return: :class:`Response <Response>` object
96
+ :rtype: requests.Response
97
+ """
98
+
99
+ kwargs.setdefault("allow_redirects", False)
100
+ return request("head", url, **kwargs)
101
+
102
+
103
+ def post(url, data=None, json=None, **kwargs):
104
+ r"""Sends a POST request.
105
+
106
+ :param url: URL for the new :class:`Request` object.
107
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
108
+ object to send in the body of the :class:`Request`.
109
+ :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
110
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
111
+ :return: :class:`Response <Response>` object
112
+ :rtype: requests.Response
113
+ """
114
+
115
+ return request("post", url, data=data, json=json, **kwargs)
116
+
117
+
118
+ def put(url, data=None, **kwargs):
119
+ r"""Sends a PUT request.
120
+
121
+ :param url: URL for the new :class:`Request` object.
122
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
123
+ object to send in the body of the :class:`Request`.
124
+ :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
125
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
126
+ :return: :class:`Response <Response>` object
127
+ :rtype: requests.Response
128
+ """
129
+
130
+ return request("put", url, data=data, **kwargs)
131
+
132
+
133
+ def patch(url, data=None, **kwargs):
134
+ r"""Sends a PATCH request.
135
+
136
+ :param url: URL for the new :class:`Request` object.
137
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
138
+ object to send in the body of the :class:`Request`.
139
+ :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
140
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
141
+ :return: :class:`Response <Response>` object
142
+ :rtype: requests.Response
143
+ """
144
+
145
+ return request("patch", url, data=data, **kwargs)
146
+
147
+
148
+ def delete(url, **kwargs):
149
+ r"""Sends a DELETE request.
150
+
151
+ :param url: URL for the new :class:`Request` object.
152
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
153
+ :return: :class:`Response <Response>` object
154
+ :rtype: requests.Response
155
+ """
156
+
157
+ return request("delete", url, **kwargs)
env-llmeval/lib/python3.10/site-packages/requests/auth.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.auth
3
+ ~~~~~~~~~~~~~
4
+
5
+ This module contains the authentication handlers for Requests.
6
+ """
7
+
8
+ import hashlib
9
+ import os
10
+ import re
11
+ import threading
12
+ import time
13
+ import warnings
14
+ from base64 import b64encode
15
+
16
+ from ._internal_utils import to_native_string
17
+ from .compat import basestring, str, urlparse
18
+ from .cookies import extract_cookies_to_jar
19
+ from .utils import parse_dict_header
20
+
21
+ CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
22
+ CONTENT_TYPE_MULTI_PART = "multipart/form-data"
23
+
24
+
25
+ def _basic_auth_str(username, password):
26
+ """Returns a Basic Auth string."""
27
+
28
+ # "I want us to put a big-ol' comment on top of it that
29
+ # says that this behaviour is dumb but we need to preserve
30
+ # it because people are relying on it."
31
+ # - Lukasa
32
+ #
33
+ # These are here solely to maintain backwards compatibility
34
+ # for things like ints. This will be removed in 3.0.0.
35
+ if not isinstance(username, basestring):
36
+ warnings.warn(
37
+ "Non-string usernames will no longer be supported in Requests "
38
+ "3.0.0. Please convert the object you've passed in ({!r}) to "
39
+ "a string or bytes object in the near future to avoid "
40
+ "problems.".format(username),
41
+ category=DeprecationWarning,
42
+ )
43
+ username = str(username)
44
+
45
+ if not isinstance(password, basestring):
46
+ warnings.warn(
47
+ "Non-string passwords will no longer be supported in Requests "
48
+ "3.0.0. Please convert the object you've passed in ({!r}) to "
49
+ "a string or bytes object in the near future to avoid "
50
+ "problems.".format(type(password)),
51
+ category=DeprecationWarning,
52
+ )
53
+ password = str(password)
54
+ # -- End Removal --
55
+
56
+ if isinstance(username, str):
57
+ username = username.encode("latin1")
58
+
59
+ if isinstance(password, str):
60
+ password = password.encode("latin1")
61
+
62
+ authstr = "Basic " + to_native_string(
63
+ b64encode(b":".join((username, password))).strip()
64
+ )
65
+
66
+ return authstr
67
+
68
+
69
+ class AuthBase:
70
+ """Base class that all auth implementations derive from"""
71
+
72
+ def __call__(self, r):
73
+ raise NotImplementedError("Auth hooks must be callable.")
74
+
75
+
76
+ class HTTPBasicAuth(AuthBase):
77
+ """Attaches HTTP Basic Authentication to the given Request object."""
78
+
79
+ def __init__(self, username, password):
80
+ self.username = username
81
+ self.password = password
82
+
83
+ def __eq__(self, other):
84
+ return all(
85
+ [
86
+ self.username == getattr(other, "username", None),
87
+ self.password == getattr(other, "password", None),
88
+ ]
89
+ )
90
+
91
+ def __ne__(self, other):
92
+ return not self == other
93
+
94
+ def __call__(self, r):
95
+ r.headers["Authorization"] = _basic_auth_str(self.username, self.password)
96
+ return r
97
+
98
+
99
+ class HTTPProxyAuth(HTTPBasicAuth):
100
+ """Attaches HTTP Proxy Authentication to a given Request object."""
101
+
102
+ def __call__(self, r):
103
+ r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password)
104
+ return r
105
+
106
+
107
+ class HTTPDigestAuth(AuthBase):
108
+ """Attaches HTTP Digest Authentication to the given Request object."""
109
+
110
+ def __init__(self, username, password):
111
+ self.username = username
112
+ self.password = password
113
+ # Keep state in per-thread local storage
114
+ self._thread_local = threading.local()
115
+
116
+ def init_per_thread_state(self):
117
+ # Ensure state is initialized just once per-thread
118
+ if not hasattr(self._thread_local, "init"):
119
+ self._thread_local.init = True
120
+ self._thread_local.last_nonce = ""
121
+ self._thread_local.nonce_count = 0
122
+ self._thread_local.chal = {}
123
+ self._thread_local.pos = None
124
+ self._thread_local.num_401_calls = None
125
+
126
+ def build_digest_header(self, method, url):
127
+ """
128
+ :rtype: str
129
+ """
130
+
131
+ realm = self._thread_local.chal["realm"]
132
+ nonce = self._thread_local.chal["nonce"]
133
+ qop = self._thread_local.chal.get("qop")
134
+ algorithm = self._thread_local.chal.get("algorithm")
135
+ opaque = self._thread_local.chal.get("opaque")
136
+ hash_utf8 = None
137
+
138
+ if algorithm is None:
139
+ _algorithm = "MD5"
140
+ else:
141
+ _algorithm = algorithm.upper()
142
+ # lambdas assume digest modules are imported at the top level
143
+ if _algorithm == "MD5" or _algorithm == "MD5-SESS":
144
+
145
+ def md5_utf8(x):
146
+ if isinstance(x, str):
147
+ x = x.encode("utf-8")
148
+ return hashlib.md5(x).hexdigest()
149
+
150
+ hash_utf8 = md5_utf8
151
+ elif _algorithm == "SHA":
152
+
153
+ def sha_utf8(x):
154
+ if isinstance(x, str):
155
+ x = x.encode("utf-8")
156
+ return hashlib.sha1(x).hexdigest()
157
+
158
+ hash_utf8 = sha_utf8
159
+ elif _algorithm == "SHA-256":
160
+
161
+ def sha256_utf8(x):
162
+ if isinstance(x, str):
163
+ x = x.encode("utf-8")
164
+ return hashlib.sha256(x).hexdigest()
165
+
166
+ hash_utf8 = sha256_utf8
167
+ elif _algorithm == "SHA-512":
168
+
169
+ def sha512_utf8(x):
170
+ if isinstance(x, str):
171
+ x = x.encode("utf-8")
172
+ return hashlib.sha512(x).hexdigest()
173
+
174
+ hash_utf8 = sha512_utf8
175
+
176
+ KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731
177
+
178
+ if hash_utf8 is None:
179
+ return None
180
+
181
+ # XXX not implemented yet
182
+ entdig = None
183
+ p_parsed = urlparse(url)
184
+ #: path is request-uri defined in RFC 2616 which should not be empty
185
+ path = p_parsed.path or "/"
186
+ if p_parsed.query:
187
+ path += f"?{p_parsed.query}"
188
+
189
+ A1 = f"{self.username}:{realm}:{self.password}"
190
+ A2 = f"{method}:{path}"
191
+
192
+ HA1 = hash_utf8(A1)
193
+ HA2 = hash_utf8(A2)
194
+
195
+ if nonce == self._thread_local.last_nonce:
196
+ self._thread_local.nonce_count += 1
197
+ else:
198
+ self._thread_local.nonce_count = 1
199
+ ncvalue = f"{self._thread_local.nonce_count:08x}"
200
+ s = str(self._thread_local.nonce_count).encode("utf-8")
201
+ s += nonce.encode("utf-8")
202
+ s += time.ctime().encode("utf-8")
203
+ s += os.urandom(8)
204
+
205
+ cnonce = hashlib.sha1(s).hexdigest()[:16]
206
+ if _algorithm == "MD5-SESS":
207
+ HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}")
208
+
209
+ if not qop:
210
+ respdig = KD(HA1, f"{nonce}:{HA2}")
211
+ elif qop == "auth" or "auth" in qop.split(","):
212
+ noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}"
213
+ respdig = KD(HA1, noncebit)
214
+ else:
215
+ # XXX handle auth-int.
216
+ return None
217
+
218
+ self._thread_local.last_nonce = nonce
219
+
220
+ # XXX should the partial digests be encoded too?
221
+ base = (
222
+ f'username="{self.username}", realm="{realm}", nonce="{nonce}", '
223
+ f'uri="{path}", response="{respdig}"'
224
+ )
225
+ if opaque:
226
+ base += f', opaque="{opaque}"'
227
+ if algorithm:
228
+ base += f', algorithm="{algorithm}"'
229
+ if entdig:
230
+ base += f', digest="{entdig}"'
231
+ if qop:
232
+ base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"'
233
+
234
+ return f"Digest {base}"
235
+
236
+ def handle_redirect(self, r, **kwargs):
237
+ """Reset num_401_calls counter on redirects."""
238
+ if r.is_redirect:
239
+ self._thread_local.num_401_calls = 1
240
+
241
+ def handle_401(self, r, **kwargs):
242
+ """
243
+ Takes the given response and tries digest-auth, if needed.
244
+
245
+ :rtype: requests.Response
246
+ """
247
+
248
+ # If response is not 4xx, do not auth
249
+ # See https://github.com/psf/requests/issues/3772
250
+ if not 400 <= r.status_code < 500:
251
+ self._thread_local.num_401_calls = 1
252
+ return r
253
+
254
+ if self._thread_local.pos is not None:
255
+ # Rewind the file position indicator of the body to where
256
+ # it was to resend the request.
257
+ r.request.body.seek(self._thread_local.pos)
258
+ s_auth = r.headers.get("www-authenticate", "")
259
+
260
+ if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2:
261
+
262
+ self._thread_local.num_401_calls += 1
263
+ pat = re.compile(r"digest ", flags=re.IGNORECASE)
264
+ self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1))
265
+
266
+ # Consume content and release the original connection
267
+ # to allow our new request to reuse the same one.
268
+ r.content
269
+ r.close()
270
+ prep = r.request.copy()
271
+ extract_cookies_to_jar(prep._cookies, r.request, r.raw)
272
+ prep.prepare_cookies(prep._cookies)
273
+
274
+ prep.headers["Authorization"] = self.build_digest_header(
275
+ prep.method, prep.url
276
+ )
277
+ _r = r.connection.send(prep, **kwargs)
278
+ _r.history.append(r)
279
+ _r.request = prep
280
+
281
+ return _r
282
+
283
+ self._thread_local.num_401_calls = 1
284
+ return r
285
+
286
+ def __call__(self, r):
287
+ # Initialize per-thread state, if needed
288
+ self.init_per_thread_state()
289
+ # If we have a saved nonce, skip the 401
290
+ if self._thread_local.last_nonce:
291
+ r.headers["Authorization"] = self.build_digest_header(r.method, r.url)
292
+ try:
293
+ self._thread_local.pos = r.body.tell()
294
+ except AttributeError:
295
+ # In the case of HTTPDigestAuth being reused and the body of
296
+ # the previous request was a file-like object, pos has the
297
+ # file position of the previous body. Ensure it's set to
298
+ # None.
299
+ self._thread_local.pos = None
300
+ r.register_hook("response", self.handle_401)
301
+ r.register_hook("response", self.handle_redirect)
302
+ self._thread_local.num_401_calls = 1
303
+
304
+ return r
305
+
306
+ def __eq__(self, other):
307
+ return all(
308
+ [
309
+ self.username == getattr(other, "username", None),
310
+ self.password == getattr(other, "password", None),
311
+ ]
312
+ )
313
+
314
+ def __ne__(self, other):
315
+ return not self == other
env-llmeval/lib/python3.10/site-packages/requests/certs.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ """
4
+ requests.certs
5
+ ~~~~~~~~~~~~~~
6
+
7
+ This module returns the preferred default CA certificate bundle. There is
8
+ only one — the one from the certifi package.
9
+
10
+ If you are packaging Requests, e.g., for a Linux distribution or a managed
11
+ environment, you can change the definition of where() to return a separately
12
+ packaged CA bundle.
13
+ """
14
+ from certifi import where
15
+
16
+ if __name__ == "__main__":
17
+ print(where())
env-llmeval/lib/python3.10/site-packages/requests/compat.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.compat
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ This module previously handled import compatibility issues
6
+ between Python 2 and Python 3. It remains for backwards
7
+ compatibility until the next major version.
8
+ """
9
+
10
+ try:
11
+ import chardet
12
+ except ImportError:
13
+ import charset_normalizer as chardet
14
+
15
+ import sys
16
+
17
+ # -------
18
+ # Pythons
19
+ # -------
20
+
21
+ # Syntax sugar.
22
+ _ver = sys.version_info
23
+
24
+ #: Python 2.x?
25
+ is_py2 = _ver[0] == 2
26
+
27
+ #: Python 3.x?
28
+ is_py3 = _ver[0] == 3
29
+
30
+ # json/simplejson module import resolution
31
+ has_simplejson = False
32
+ try:
33
+ import simplejson as json
34
+
35
+ has_simplejson = True
36
+ except ImportError:
37
+ import json
38
+
39
+ if has_simplejson:
40
+ from simplejson import JSONDecodeError
41
+ else:
42
+ from json import JSONDecodeError
43
+
44
+ # Keep OrderedDict for backwards compatibility.
45
+ from collections import OrderedDict
46
+ from collections.abc import Callable, Mapping, MutableMapping
47
+ from http import cookiejar as cookielib
48
+ from http.cookies import Morsel
49
+ from io import StringIO
50
+
51
+ # --------------
52
+ # Legacy Imports
53
+ # --------------
54
+ from urllib.parse import (
55
+ quote,
56
+ quote_plus,
57
+ unquote,
58
+ unquote_plus,
59
+ urldefrag,
60
+ urlencode,
61
+ urljoin,
62
+ urlparse,
63
+ urlsplit,
64
+ urlunparse,
65
+ )
66
+ from urllib.request import (
67
+ getproxies,
68
+ getproxies_environment,
69
+ parse_http_list,
70
+ proxy_bypass,
71
+ proxy_bypass_environment,
72
+ )
73
+
74
+ builtin_str = str
75
+ str = str
76
+ bytes = bytes
77
+ basestring = (str, bytes)
78
+ numeric_types = (int, float)
79
+ integer_types = (int,)
env-llmeval/lib/python3.10/site-packages/requests/exceptions.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.exceptions
3
+ ~~~~~~~~~~~~~~~~~~~
4
+
5
+ This module contains the set of Requests' exceptions.
6
+ """
7
+ from urllib3.exceptions import HTTPError as BaseHTTPError
8
+
9
+ from .compat import JSONDecodeError as CompatJSONDecodeError
10
+
11
+
12
+ class RequestException(IOError):
13
+ """There was an ambiguous exception that occurred while handling your
14
+ request.
15
+ """
16
+
17
+ def __init__(self, *args, **kwargs):
18
+ """Initialize RequestException with `request` and `response` objects."""
19
+ response = kwargs.pop("response", None)
20
+ self.response = response
21
+ self.request = kwargs.pop("request", None)
22
+ if response is not None and not self.request and hasattr(response, "request"):
23
+ self.request = self.response.request
24
+ super().__init__(*args, **kwargs)
25
+
26
+
27
+ class InvalidJSONError(RequestException):
28
+ """A JSON error occurred."""
29
+
30
+
31
+ class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
32
+ """Couldn't decode the text into json"""
33
+
34
+ def __init__(self, *args, **kwargs):
35
+ """
36
+ Construct the JSONDecodeError instance first with all
37
+ args. Then use it's args to construct the IOError so that
38
+ the json specific args aren't used as IOError specific args
39
+ and the error message from JSONDecodeError is preserved.
40
+ """
41
+ CompatJSONDecodeError.__init__(self, *args)
42
+ InvalidJSONError.__init__(self, *self.args, **kwargs)
43
+
44
+
45
+ class HTTPError(RequestException):
46
+ """An HTTP error occurred."""
47
+
48
+
49
+ class ConnectionError(RequestException):
50
+ """A Connection error occurred."""
51
+
52
+
53
+ class ProxyError(ConnectionError):
54
+ """A proxy error occurred."""
55
+
56
+
57
+ class SSLError(ConnectionError):
58
+ """An SSL error occurred."""
59
+
60
+
61
+ class Timeout(RequestException):
62
+ """The request timed out.
63
+
64
+ Catching this error will catch both
65
+ :exc:`~requests.exceptions.ConnectTimeout` and
66
+ :exc:`~requests.exceptions.ReadTimeout` errors.
67
+ """
68
+
69
+
70
+ class ConnectTimeout(ConnectionError, Timeout):
71
+ """The request timed out while trying to connect to the remote server.
72
+
73
+ Requests that produced this error are safe to retry.
74
+ """
75
+
76
+
77
+ class ReadTimeout(Timeout):
78
+ """The server did not send any data in the allotted amount of time."""
79
+
80
+
81
+ class URLRequired(RequestException):
82
+ """A valid URL is required to make a request."""
83
+
84
+
85
+ class TooManyRedirects(RequestException):
86
+ """Too many redirects."""
87
+
88
+
89
+ class MissingSchema(RequestException, ValueError):
90
+ """The URL scheme (e.g. http or https) is missing."""
91
+
92
+
93
+ class InvalidSchema(RequestException, ValueError):
94
+ """The URL scheme provided is either invalid or unsupported."""
95
+
96
+
97
+ class InvalidURL(RequestException, ValueError):
98
+ """The URL provided was somehow invalid."""
99
+
100
+
101
+ class InvalidHeader(RequestException, ValueError):
102
+ """The header value provided was somehow invalid."""
103
+
104
+
105
+ class InvalidProxyURL(InvalidURL):
106
+ """The proxy URL provided is invalid."""
107
+
108
+
109
+ class ChunkedEncodingError(RequestException):
110
+ """The server declared chunked encoding but sent an invalid chunk."""
111
+
112
+
113
+ class ContentDecodingError(RequestException, BaseHTTPError):
114
+ """Failed to decode response content."""
115
+
116
+
117
+ class StreamConsumedError(RequestException, TypeError):
118
+ """The content for this response was already consumed."""
119
+
120
+
121
+ class RetryError(RequestException):
122
+ """Custom retries logic failed"""
123
+
124
+
125
+ class UnrewindableBodyError(RequestException):
126
+ """Requests encountered an error when trying to rewind a body."""
127
+
128
+
129
+ # Warnings
130
+
131
+
132
+ class RequestsWarning(Warning):
133
+ """Base warning for Requests."""
134
+
135
+
136
+ class FileModeWarning(RequestsWarning, DeprecationWarning):
137
+ """A file was opened in text mode, but Requests determined its binary length."""
138
+
139
+
140
+ class RequestsDependencyWarning(RequestsWarning):
141
+ """An imported dependency doesn't match the expected version range."""
env-llmeval/lib/python3.10/site-packages/requests/help.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module containing bug report helper(s)."""
2
+
3
+ import json
4
+ import platform
5
+ import ssl
6
+ import sys
7
+
8
+ import idna
9
+ import urllib3
10
+
11
+ from . import __version__ as requests_version
12
+
13
+ try:
14
+ import charset_normalizer
15
+ except ImportError:
16
+ charset_normalizer = None
17
+
18
+ try:
19
+ import chardet
20
+ except ImportError:
21
+ chardet = None
22
+
23
+ try:
24
+ from urllib3.contrib import pyopenssl
25
+ except ImportError:
26
+ pyopenssl = None
27
+ OpenSSL = None
28
+ cryptography = None
29
+ else:
30
+ import cryptography
31
+ import OpenSSL
32
+
33
+
34
+ def _implementation():
35
+ """Return a dict with the Python implementation and version.
36
+
37
+ Provide both the name and the version of the Python implementation
38
+ currently running. For example, on CPython 3.10.3 it will return
39
+ {'name': 'CPython', 'version': '3.10.3'}.
40
+
41
+ This function works best on CPython and PyPy: in particular, it probably
42
+ doesn't work for Jython or IronPython. Future investigation should be done
43
+ to work out the correct shape of the code for those platforms.
44
+ """
45
+ implementation = platform.python_implementation()
46
+
47
+ if implementation == "CPython":
48
+ implementation_version = platform.python_version()
49
+ elif implementation == "PyPy":
50
+ implementation_version = "{}.{}.{}".format(
51
+ sys.pypy_version_info.major,
52
+ sys.pypy_version_info.minor,
53
+ sys.pypy_version_info.micro,
54
+ )
55
+ if sys.pypy_version_info.releaselevel != "final":
56
+ implementation_version = "".join(
57
+ [implementation_version, sys.pypy_version_info.releaselevel]
58
+ )
59
+ elif implementation == "Jython":
60
+ implementation_version = platform.python_version() # Complete Guess
61
+ elif implementation == "IronPython":
62
+ implementation_version = platform.python_version() # Complete Guess
63
+ else:
64
+ implementation_version = "Unknown"
65
+
66
+ return {"name": implementation, "version": implementation_version}
67
+
68
+
69
+ def info():
70
+ """Generate information for a bug report."""
71
+ try:
72
+ platform_info = {
73
+ "system": platform.system(),
74
+ "release": platform.release(),
75
+ }
76
+ except OSError:
77
+ platform_info = {
78
+ "system": "Unknown",
79
+ "release": "Unknown",
80
+ }
81
+
82
+ implementation_info = _implementation()
83
+ urllib3_info = {"version": urllib3.__version__}
84
+ charset_normalizer_info = {"version": None}
85
+ chardet_info = {"version": None}
86
+ if charset_normalizer:
87
+ charset_normalizer_info = {"version": charset_normalizer.__version__}
88
+ if chardet:
89
+ chardet_info = {"version": chardet.__version__}
90
+
91
+ pyopenssl_info = {
92
+ "version": None,
93
+ "openssl_version": "",
94
+ }
95
+ if OpenSSL:
96
+ pyopenssl_info = {
97
+ "version": OpenSSL.__version__,
98
+ "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}",
99
+ }
100
+ cryptography_info = {
101
+ "version": getattr(cryptography, "__version__", ""),
102
+ }
103
+ idna_info = {
104
+ "version": getattr(idna, "__version__", ""),
105
+ }
106
+
107
+ system_ssl = ssl.OPENSSL_VERSION_NUMBER
108
+ system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""}
109
+
110
+ return {
111
+ "platform": platform_info,
112
+ "implementation": implementation_info,
113
+ "system_ssl": system_ssl_info,
114
+ "using_pyopenssl": pyopenssl is not None,
115
+ "using_charset_normalizer": chardet is None,
116
+ "pyOpenSSL": pyopenssl_info,
117
+ "urllib3": urllib3_info,
118
+ "chardet": chardet_info,
119
+ "charset_normalizer": charset_normalizer_info,
120
+ "cryptography": cryptography_info,
121
+ "idna": idna_info,
122
+ "requests": {
123
+ "version": requests_version,
124
+ },
125
+ }
126
+
127
+
128
+ def main():
129
+ """Pretty-print the bug information as JSON."""
130
+ print(json.dumps(info(), sort_keys=True, indent=2))
131
+
132
+
133
+ if __name__ == "__main__":
134
+ main()
env-llmeval/lib/python3.10/site-packages/requests/hooks.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.hooks
3
+ ~~~~~~~~~~~~~~
4
+
5
+ This module provides the capabilities for the Requests hooks system.
6
+
7
+ Available hooks:
8
+
9
+ ``response``:
10
+ The response generated from a Request.
11
+ """
12
+ HOOKS = ["response"]
13
+
14
+
15
+ def default_hooks():
16
+ return {event: [] for event in HOOKS}
17
+
18
+
19
+ # TODO: response is the only one
20
+
21
+
22
+ def dispatch_hook(key, hooks, hook_data, **kwargs):
23
+ """Dispatches a hook dictionary on a given piece of data."""
24
+ hooks = hooks or {}
25
+ hooks = hooks.get(key)
26
+ if hooks:
27
+ if hasattr(hooks, "__call__"):
28
+ hooks = [hooks]
29
+ for hook in hooks:
30
+ _hook_data = hook(hook_data, **kwargs)
31
+ if _hook_data is not None:
32
+ hook_data = _hook_data
33
+ return hook_data
env-llmeval/lib/python3.10/site-packages/requests/packages.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ try:
4
+ import chardet
5
+ except ImportError:
6
+ import warnings
7
+
8
+ import charset_normalizer as chardet
9
+
10
+ warnings.filterwarnings("ignore", "Trying to detect", module="charset_normalizer")
11
+
12
+ # This code exists for backwards compatibility reasons.
13
+ # I don't like it either. Just look the other way. :)
14
+
15
+ for package in ("urllib3", "idna"):
16
+ locals()[package] = __import__(package)
17
+ # This traversal is apparently necessary such that the identities are
18
+ # preserved (requests.packages.urllib3.* is urllib3.*)
19
+ for mod in list(sys.modules):
20
+ if mod == package or mod.startswith(f"{package}."):
21
+ sys.modules[f"requests.packages.{mod}"] = sys.modules[mod]
22
+
23
+ target = chardet.__name__
24
+ for mod in list(sys.modules):
25
+ if mod == target or mod.startswith(f"{target}."):
26
+ target = target.replace(target, "chardet")
27
+ sys.modules[f"requests.packages.{target}"] = sys.modules[mod]
28
+ # Kinda cool, though, right?
env-llmeval/lib/python3.10/site-packages/requests/sessions.py ADDED
@@ -0,0 +1,833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.sessions
3
+ ~~~~~~~~~~~~~~~~~
4
+
5
+ This module provides a Session object to manage and persist settings across
6
+ requests (cookies, auth, proxies).
7
+ """
8
+ import os
9
+ import sys
10
+ import time
11
+ from collections import OrderedDict
12
+ from datetime import timedelta
13
+
14
+ from ._internal_utils import to_native_string
15
+ from .adapters import HTTPAdapter
16
+ from .auth import _basic_auth_str
17
+ from .compat import Mapping, cookielib, urljoin, urlparse
18
+ from .cookies import (
19
+ RequestsCookieJar,
20
+ cookiejar_from_dict,
21
+ extract_cookies_to_jar,
22
+ merge_cookies,
23
+ )
24
+ from .exceptions import (
25
+ ChunkedEncodingError,
26
+ ContentDecodingError,
27
+ InvalidSchema,
28
+ TooManyRedirects,
29
+ )
30
+ from .hooks import default_hooks, dispatch_hook
31
+
32
+ # formerly defined here, reexposed here for backward compatibility
33
+ from .models import ( # noqa: F401
34
+ DEFAULT_REDIRECT_LIMIT,
35
+ REDIRECT_STATI,
36
+ PreparedRequest,
37
+ Request,
38
+ )
39
+ from .status_codes import codes
40
+ from .structures import CaseInsensitiveDict
41
+ from .utils import ( # noqa: F401
42
+ DEFAULT_PORTS,
43
+ default_headers,
44
+ get_auth_from_url,
45
+ get_environ_proxies,
46
+ get_netrc_auth,
47
+ requote_uri,
48
+ resolve_proxies,
49
+ rewind_body,
50
+ should_bypass_proxies,
51
+ to_key_val_list,
52
+ )
53
+
54
+ # Preferred clock, based on which one is more accurate on a given system.
55
+ if sys.platform == "win32":
56
+ preferred_clock = time.perf_counter
57
+ else:
58
+ preferred_clock = time.time
59
+
60
+
61
+ def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
62
+ """Determines appropriate setting for a given request, taking into account
63
+ the explicit setting on that request, and the setting in the session. If a
64
+ setting is a dictionary, they will be merged together using `dict_class`
65
+ """
66
+
67
+ if session_setting is None:
68
+ return request_setting
69
+
70
+ if request_setting is None:
71
+ return session_setting
72
+
73
+ # Bypass if not a dictionary (e.g. verify)
74
+ if not (
75
+ isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
76
+ ):
77
+ return request_setting
78
+
79
+ merged_setting = dict_class(to_key_val_list(session_setting))
80
+ merged_setting.update(to_key_val_list(request_setting))
81
+
82
+ # Remove keys that are set to None. Extract keys first to avoid altering
83
+ # the dictionary during iteration.
84
+ none_keys = [k for (k, v) in merged_setting.items() if v is None]
85
+ for key in none_keys:
86
+ del merged_setting[key]
87
+
88
+ return merged_setting
89
+
90
+
91
+ def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
92
+ """Properly merges both requests and session hooks.
93
+
94
+ This is necessary because when request_hooks == {'response': []}, the
95
+ merge breaks Session hooks entirely.
96
+ """
97
+ if session_hooks is None or session_hooks.get("response") == []:
98
+ return request_hooks
99
+
100
+ if request_hooks is None or request_hooks.get("response") == []:
101
+ return session_hooks
102
+
103
+ return merge_setting(request_hooks, session_hooks, dict_class)
104
+
105
+
106
+ class SessionRedirectMixin:
107
+ def get_redirect_target(self, resp):
108
+ """Receives a Response. Returns a redirect URI or ``None``"""
109
+ # Due to the nature of how requests processes redirects this method will
110
+ # be called at least once upon the original response and at least twice
111
+ # on each subsequent redirect response (if any).
112
+ # If a custom mixin is used to handle this logic, it may be advantageous
113
+ # to cache the redirect location onto the response object as a private
114
+ # attribute.
115
+ if resp.is_redirect:
116
+ location = resp.headers["location"]
117
+ # Currently the underlying http module on py3 decode headers
118
+ # in latin1, but empirical evidence suggests that latin1 is very
119
+ # rarely used with non-ASCII characters in HTTP headers.
120
+ # It is more likely to get UTF8 header rather than latin1.
121
+ # This causes incorrect handling of UTF8 encoded location headers.
122
+ # To solve this, we re-encode the location in latin1.
123
+ location = location.encode("latin1")
124
+ return to_native_string(location, "utf8")
125
+ return None
126
+
127
+ def should_strip_auth(self, old_url, new_url):
128
+ """Decide whether Authorization header should be removed when redirecting"""
129
+ old_parsed = urlparse(old_url)
130
+ new_parsed = urlparse(new_url)
131
+ if old_parsed.hostname != new_parsed.hostname:
132
+ return True
133
+ # Special case: allow http -> https redirect when using the standard
134
+ # ports. This isn't specified by RFC 7235, but is kept to avoid
135
+ # breaking backwards compatibility with older versions of requests
136
+ # that allowed any redirects on the same host.
137
+ if (
138
+ old_parsed.scheme == "http"
139
+ and old_parsed.port in (80, None)
140
+ and new_parsed.scheme == "https"
141
+ and new_parsed.port in (443, None)
142
+ ):
143
+ return False
144
+
145
+ # Handle default port usage corresponding to scheme.
146
+ changed_port = old_parsed.port != new_parsed.port
147
+ changed_scheme = old_parsed.scheme != new_parsed.scheme
148
+ default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
149
+ if (
150
+ not changed_scheme
151
+ and old_parsed.port in default_port
152
+ and new_parsed.port in default_port
153
+ ):
154
+ return False
155
+
156
+ # Standard case: root URI must match
157
+ return changed_port or changed_scheme
158
+
159
+ def resolve_redirects(
160
+ self,
161
+ resp,
162
+ req,
163
+ stream=False,
164
+ timeout=None,
165
+ verify=True,
166
+ cert=None,
167
+ proxies=None,
168
+ yield_requests=False,
169
+ **adapter_kwargs,
170
+ ):
171
+ """Receives a Response. Returns a generator of Responses or Requests."""
172
+
173
+ hist = [] # keep track of history
174
+
175
+ url = self.get_redirect_target(resp)
176
+ previous_fragment = urlparse(req.url).fragment
177
+ while url:
178
+ prepared_request = req.copy()
179
+
180
+ # Update history and keep track of redirects.
181
+ # resp.history must ignore the original request in this loop
182
+ hist.append(resp)
183
+ resp.history = hist[1:]
184
+
185
+ try:
186
+ resp.content # Consume socket so it can be released
187
+ except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
188
+ resp.raw.read(decode_content=False)
189
+
190
+ if len(resp.history) >= self.max_redirects:
191
+ raise TooManyRedirects(
192
+ f"Exceeded {self.max_redirects} redirects.", response=resp
193
+ )
194
+
195
+ # Release the connection back into the pool.
196
+ resp.close()
197
+
198
+ # Handle redirection without scheme (see: RFC 1808 Section 4)
199
+ if url.startswith("//"):
200
+ parsed_rurl = urlparse(resp.url)
201
+ url = ":".join([to_native_string(parsed_rurl.scheme), url])
202
+
203
+ # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
204
+ parsed = urlparse(url)
205
+ if parsed.fragment == "" and previous_fragment:
206
+ parsed = parsed._replace(fragment=previous_fragment)
207
+ elif parsed.fragment:
208
+ previous_fragment = parsed.fragment
209
+ url = parsed.geturl()
210
+
211
+ # Facilitate relative 'location' headers, as allowed by RFC 7231.
212
+ # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
213
+ # Compliant with RFC3986, we percent encode the url.
214
+ if not parsed.netloc:
215
+ url = urljoin(resp.url, requote_uri(url))
216
+ else:
217
+ url = requote_uri(url)
218
+
219
+ prepared_request.url = to_native_string(url)
220
+
221
+ self.rebuild_method(prepared_request, resp)
222
+
223
+ # https://github.com/psf/requests/issues/1084
224
+ if resp.status_code not in (
225
+ codes.temporary_redirect,
226
+ codes.permanent_redirect,
227
+ ):
228
+ # https://github.com/psf/requests/issues/3490
229
+ purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding")
230
+ for header in purged_headers:
231
+ prepared_request.headers.pop(header, None)
232
+ prepared_request.body = None
233
+
234
+ headers = prepared_request.headers
235
+ headers.pop("Cookie", None)
236
+
237
+ # Extract any cookies sent on the response to the cookiejar
238
+ # in the new request. Because we've mutated our copied prepared
239
+ # request, use the old one that we haven't yet touched.
240
+ extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
241
+ merge_cookies(prepared_request._cookies, self.cookies)
242
+ prepared_request.prepare_cookies(prepared_request._cookies)
243
+
244
+ # Rebuild auth and proxy information.
245
+ proxies = self.rebuild_proxies(prepared_request, proxies)
246
+ self.rebuild_auth(prepared_request, resp)
247
+
248
+ # A failed tell() sets `_body_position` to `object()`. This non-None
249
+ # value ensures `rewindable` will be True, allowing us to raise an
250
+ # UnrewindableBodyError, instead of hanging the connection.
251
+ rewindable = prepared_request._body_position is not None and (
252
+ "Content-Length" in headers or "Transfer-Encoding" in headers
253
+ )
254
+
255
+ # Attempt to rewind consumed file-like object.
256
+ if rewindable:
257
+ rewind_body(prepared_request)
258
+
259
+ # Override the original request.
260
+ req = prepared_request
261
+
262
+ if yield_requests:
263
+ yield req
264
+ else:
265
+
266
+ resp = self.send(
267
+ req,
268
+ stream=stream,
269
+ timeout=timeout,
270
+ verify=verify,
271
+ cert=cert,
272
+ proxies=proxies,
273
+ allow_redirects=False,
274
+ **adapter_kwargs,
275
+ )
276
+
277
+ extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
278
+
279
+ # extract redirect url, if any, for the next loop
280
+ url = self.get_redirect_target(resp)
281
+ yield resp
282
+
283
+ def rebuild_auth(self, prepared_request, response):
284
+ """When being redirected we may want to strip authentication from the
285
+ request to avoid leaking credentials. This method intelligently removes
286
+ and reapplies authentication where possible to avoid credential loss.
287
+ """
288
+ headers = prepared_request.headers
289
+ url = prepared_request.url
290
+
291
+ if "Authorization" in headers and self.should_strip_auth(
292
+ response.request.url, url
293
+ ):
294
+ # If we get redirected to a new host, we should strip out any
295
+ # authentication headers.
296
+ del headers["Authorization"]
297
+
298
+ # .netrc might have more auth for us on our new host.
299
+ new_auth = get_netrc_auth(url) if self.trust_env else None
300
+ if new_auth is not None:
301
+ prepared_request.prepare_auth(new_auth)
302
+
303
+ def rebuild_proxies(self, prepared_request, proxies):
304
+ """This method re-evaluates the proxy configuration by considering the
305
+ environment variables. If we are redirected to a URL covered by
306
+ NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
307
+ proxy keys for this URL (in case they were stripped by a previous
308
+ redirect).
309
+
310
+ This method also replaces the Proxy-Authorization header where
311
+ necessary.
312
+
313
+ :rtype: dict
314
+ """
315
+ headers = prepared_request.headers
316
+ scheme = urlparse(prepared_request.url).scheme
317
+ new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
318
+
319
+ if "Proxy-Authorization" in headers:
320
+ del headers["Proxy-Authorization"]
321
+
322
+ try:
323
+ username, password = get_auth_from_url(new_proxies[scheme])
324
+ except KeyError:
325
+ username, password = None, None
326
+
327
+ # urllib3 handles proxy authorization for us in the standard adapter.
328
+ # Avoid appending this to TLS tunneled requests where it may be leaked.
329
+ if not scheme.startswith('https') and username and password:
330
+ headers["Proxy-Authorization"] = _basic_auth_str(username, password)
331
+
332
+ return new_proxies
333
+
334
+ def rebuild_method(self, prepared_request, response):
335
+ """When being redirected we may want to change the method of the request
336
+ based on certain specs or browser behavior.
337
+ """
338
+ method = prepared_request.method
339
+
340
+ # https://tools.ietf.org/html/rfc7231#section-6.4.4
341
+ if response.status_code == codes.see_other and method != "HEAD":
342
+ method = "GET"
343
+
344
+ # Do what the browsers do, despite standards...
345
+ # First, turn 302s into GETs.
346
+ if response.status_code == codes.found and method != "HEAD":
347
+ method = "GET"
348
+
349
+ # Second, if a POST is responded to with a 301, turn it into a GET.
350
+ # This bizarre behaviour is explained in Issue 1704.
351
+ if response.status_code == codes.moved and method == "POST":
352
+ method = "GET"
353
+
354
+ prepared_request.method = method
355
+
356
+
357
+ class Session(SessionRedirectMixin):
358
+ """A Requests session.
359
+
360
+ Provides cookie persistence, connection-pooling, and configuration.
361
+
362
+ Basic Usage::
363
+
364
+ >>> import requests
365
+ >>> s = requests.Session()
366
+ >>> s.get('https://httpbin.org/get')
367
+ <Response [200]>
368
+
369
+ Or as a context manager::
370
+
371
+ >>> with requests.Session() as s:
372
+ ... s.get('https://httpbin.org/get')
373
+ <Response [200]>
374
+ """
375
+
376
+ __attrs__ = [
377
+ "headers",
378
+ "cookies",
379
+ "auth",
380
+ "proxies",
381
+ "hooks",
382
+ "params",
383
+ "verify",
384
+ "cert",
385
+ "adapters",
386
+ "stream",
387
+ "trust_env",
388
+ "max_redirects",
389
+ ]
390
+
391
+ def __init__(self):
392
+
393
+ #: A case-insensitive dictionary of headers to be sent on each
394
+ #: :class:`Request <Request>` sent from this
395
+ #: :class:`Session <Session>`.
396
+ self.headers = default_headers()
397
+
398
+ #: Default Authentication tuple or object to attach to
399
+ #: :class:`Request <Request>`.
400
+ self.auth = None
401
+
402
+ #: Dictionary mapping protocol or protocol and host to the URL of the proxy
403
+ #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
404
+ #: be used on each :class:`Request <Request>`.
405
+ self.proxies = {}
406
+
407
+ #: Event-handling hooks.
408
+ self.hooks = default_hooks()
409
+
410
+ #: Dictionary of querystring data to attach to each
411
+ #: :class:`Request <Request>`. The dictionary values may be lists for
412
+ #: representing multivalued query parameters.
413
+ self.params = {}
414
+
415
+ #: Stream response content default.
416
+ self.stream = False
417
+
418
+ #: SSL Verification default.
419
+ #: Defaults to `True`, requiring requests to verify the TLS certificate at the
420
+ #: remote end.
421
+ #: If verify is set to `False`, requests will accept any TLS certificate
422
+ #: presented by the server, and will ignore hostname mismatches and/or
423
+ #: expired certificates, which will make your application vulnerable to
424
+ #: man-in-the-middle (MitM) attacks.
425
+ #: Only set this to `False` for testing.
426
+ self.verify = True
427
+
428
+ #: SSL client certificate default, if String, path to ssl client
429
+ #: cert file (.pem). If Tuple, ('cert', 'key') pair.
430
+ self.cert = None
431
+
432
+ #: Maximum number of redirects allowed. If the request exceeds this
433
+ #: limit, a :class:`TooManyRedirects` exception is raised.
434
+ #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
435
+ #: 30.
436
+ self.max_redirects = DEFAULT_REDIRECT_LIMIT
437
+
438
+ #: Trust environment settings for proxy configuration, default
439
+ #: authentication and similar.
440
+ self.trust_env = True
441
+
442
+ #: A CookieJar containing all currently outstanding cookies set on this
443
+ #: session. By default it is a
444
+ #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
445
+ #: may be any other ``cookielib.CookieJar`` compatible object.
446
+ self.cookies = cookiejar_from_dict({})
447
+
448
+ # Default connection adapters.
449
+ self.adapters = OrderedDict()
450
+ self.mount("https://", HTTPAdapter())
451
+ self.mount("http://", HTTPAdapter())
452
+
453
+ def __enter__(self):
454
+ return self
455
+
456
+ def __exit__(self, *args):
457
+ self.close()
458
+
459
+ def prepare_request(self, request):
460
+ """Constructs a :class:`PreparedRequest <PreparedRequest>` for
461
+ transmission and returns it. The :class:`PreparedRequest` has settings
462
+ merged from the :class:`Request <Request>` instance and those of the
463
+ :class:`Session`.
464
+
465
+ :param request: :class:`Request` instance to prepare with this
466
+ session's settings.
467
+ :rtype: requests.PreparedRequest
468
+ """
469
+ cookies = request.cookies or {}
470
+
471
+ # Bootstrap CookieJar.
472
+ if not isinstance(cookies, cookielib.CookieJar):
473
+ cookies = cookiejar_from_dict(cookies)
474
+
475
+ # Merge with session cookies
476
+ merged_cookies = merge_cookies(
477
+ merge_cookies(RequestsCookieJar(), self.cookies), cookies
478
+ )
479
+
480
+ # Set environment's basic authentication if not explicitly set.
481
+ auth = request.auth
482
+ if self.trust_env and not auth and not self.auth:
483
+ auth = get_netrc_auth(request.url)
484
+
485
+ p = PreparedRequest()
486
+ p.prepare(
487
+ method=request.method.upper(),
488
+ url=request.url,
489
+ files=request.files,
490
+ data=request.data,
491
+ json=request.json,
492
+ headers=merge_setting(
493
+ request.headers, self.headers, dict_class=CaseInsensitiveDict
494
+ ),
495
+ params=merge_setting(request.params, self.params),
496
+ auth=merge_setting(auth, self.auth),
497
+ cookies=merged_cookies,
498
+ hooks=merge_hooks(request.hooks, self.hooks),
499
+ )
500
+ return p
501
+
502
+ def request(
503
+ self,
504
+ method,
505
+ url,
506
+ params=None,
507
+ data=None,
508
+ headers=None,
509
+ cookies=None,
510
+ files=None,
511
+ auth=None,
512
+ timeout=None,
513
+ allow_redirects=True,
514
+ proxies=None,
515
+ hooks=None,
516
+ stream=None,
517
+ verify=None,
518
+ cert=None,
519
+ json=None,
520
+ ):
521
+ """Constructs a :class:`Request <Request>`, prepares it and sends it.
522
+ Returns :class:`Response <Response>` object.
523
+
524
+ :param method: method for the new :class:`Request` object.
525
+ :param url: URL for the new :class:`Request` object.
526
+ :param params: (optional) Dictionary or bytes to be sent in the query
527
+ string for the :class:`Request`.
528
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
529
+ object to send in the body of the :class:`Request`.
530
+ :param json: (optional) json to send in the body of the
531
+ :class:`Request`.
532
+ :param headers: (optional) Dictionary of HTTP Headers to send with the
533
+ :class:`Request`.
534
+ :param cookies: (optional) Dict or CookieJar object to send with the
535
+ :class:`Request`.
536
+ :param files: (optional) Dictionary of ``'filename': file-like-objects``
537
+ for multipart encoding upload.
538
+ :param auth: (optional) Auth tuple or callable to enable
539
+ Basic/Digest/Custom HTTP Auth.
540
+ :param timeout: (optional) How long to wait for the server to send
541
+ data before giving up, as a float, or a :ref:`(connect timeout,
542
+ read timeout) <timeouts>` tuple.
543
+ :type timeout: float or tuple
544
+ :param allow_redirects: (optional) Set to True by default.
545
+ :type allow_redirects: bool
546
+ :param proxies: (optional) Dictionary mapping protocol or protocol and
547
+ hostname to the URL of the proxy.
548
+ :param stream: (optional) whether to immediately download the response
549
+ content. Defaults to ``False``.
550
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
551
+ the server's TLS certificate, or a string, in which case it must be a path
552
+ to a CA bundle to use. Defaults to ``True``. When set to
553
+ ``False``, requests will accept any TLS certificate presented by
554
+ the server, and will ignore hostname mismatches and/or expired
555
+ certificates, which will make your application vulnerable to
556
+ man-in-the-middle (MitM) attacks. Setting verify to ``False``
557
+ may be useful during local development or testing.
558
+ :param cert: (optional) if String, path to ssl client cert file (.pem).
559
+ If Tuple, ('cert', 'key') pair.
560
+ :rtype: requests.Response
561
+ """
562
+ # Create the Request.
563
+ req = Request(
564
+ method=method.upper(),
565
+ url=url,
566
+ headers=headers,
567
+ files=files,
568
+ data=data or {},
569
+ json=json,
570
+ params=params or {},
571
+ auth=auth,
572
+ cookies=cookies,
573
+ hooks=hooks,
574
+ )
575
+ prep = self.prepare_request(req)
576
+
577
+ proxies = proxies or {}
578
+
579
+ settings = self.merge_environment_settings(
580
+ prep.url, proxies, stream, verify, cert
581
+ )
582
+
583
+ # Send the request.
584
+ send_kwargs = {
585
+ "timeout": timeout,
586
+ "allow_redirects": allow_redirects,
587
+ }
588
+ send_kwargs.update(settings)
589
+ resp = self.send(prep, **send_kwargs)
590
+
591
+ return resp
592
+
593
+ def get(self, url, **kwargs):
594
+ r"""Sends a GET request. Returns :class:`Response` object.
595
+
596
+ :param url: URL for the new :class:`Request` object.
597
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
598
+ :rtype: requests.Response
599
+ """
600
+
601
+ kwargs.setdefault("allow_redirects", True)
602
+ return self.request("GET", url, **kwargs)
603
+
604
+ def options(self, url, **kwargs):
605
+ r"""Sends a OPTIONS request. Returns :class:`Response` object.
606
+
607
+ :param url: URL for the new :class:`Request` object.
608
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
609
+ :rtype: requests.Response
610
+ """
611
+
612
+ kwargs.setdefault("allow_redirects", True)
613
+ return self.request("OPTIONS", url, **kwargs)
614
+
615
+ def head(self, url, **kwargs):
616
+ r"""Sends a HEAD request. Returns :class:`Response` object.
617
+
618
+ :param url: URL for the new :class:`Request` object.
619
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
620
+ :rtype: requests.Response
621
+ """
622
+
623
+ kwargs.setdefault("allow_redirects", False)
624
+ return self.request("HEAD", url, **kwargs)
625
+
626
+ def post(self, url, data=None, json=None, **kwargs):
627
+ r"""Sends a POST request. Returns :class:`Response` object.
628
+
629
+ :param url: URL for the new :class:`Request` object.
630
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
631
+ object to send in the body of the :class:`Request`.
632
+ :param json: (optional) json to send in the body of the :class:`Request`.
633
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
634
+ :rtype: requests.Response
635
+ """
636
+
637
+ return self.request("POST", url, data=data, json=json, **kwargs)
638
+
639
+ def put(self, url, data=None, **kwargs):
640
+ r"""Sends a PUT request. Returns :class:`Response` object.
641
+
642
+ :param url: URL for the new :class:`Request` object.
643
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
644
+ object to send in the body of the :class:`Request`.
645
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
646
+ :rtype: requests.Response
647
+ """
648
+
649
+ return self.request("PUT", url, data=data, **kwargs)
650
+
651
+ def patch(self, url, data=None, **kwargs):
652
+ r"""Sends a PATCH request. Returns :class:`Response` object.
653
+
654
+ :param url: URL for the new :class:`Request` object.
655
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
656
+ object to send in the body of the :class:`Request`.
657
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
658
+ :rtype: requests.Response
659
+ """
660
+
661
+ return self.request("PATCH", url, data=data, **kwargs)
662
+
663
+ def delete(self, url, **kwargs):
664
+ r"""Sends a DELETE request. Returns :class:`Response` object.
665
+
666
+ :param url: URL for the new :class:`Request` object.
667
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
668
+ :rtype: requests.Response
669
+ """
670
+
671
+ return self.request("DELETE", url, **kwargs)
672
+
673
+ def send(self, request, **kwargs):
674
+ """Send a given PreparedRequest.
675
+
676
+ :rtype: requests.Response
677
+ """
678
+ # Set defaults that the hooks can utilize to ensure they always have
679
+ # the correct parameters to reproduce the previous request.
680
+ kwargs.setdefault("stream", self.stream)
681
+ kwargs.setdefault("verify", self.verify)
682
+ kwargs.setdefault("cert", self.cert)
683
+ if "proxies" not in kwargs:
684
+ kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
685
+
686
+ # It's possible that users might accidentally send a Request object.
687
+ # Guard against that specific failure case.
688
+ if isinstance(request, Request):
689
+ raise ValueError("You can only send PreparedRequests.")
690
+
691
+ # Set up variables needed for resolve_redirects and dispatching of hooks
692
+ allow_redirects = kwargs.pop("allow_redirects", True)
693
+ stream = kwargs.get("stream")
694
+ hooks = request.hooks
695
+
696
+ # Get the appropriate adapter to use
697
+ adapter = self.get_adapter(url=request.url)
698
+
699
+ # Start time (approximately) of the request
700
+ start = preferred_clock()
701
+
702
+ # Send the request
703
+ r = adapter.send(request, **kwargs)
704
+
705
+ # Total elapsed time of the request (approximately)
706
+ elapsed = preferred_clock() - start
707
+ r.elapsed = timedelta(seconds=elapsed)
708
+
709
+ # Response manipulation hooks
710
+ r = dispatch_hook("response", hooks, r, **kwargs)
711
+
712
+ # Persist cookies
713
+ if r.history:
714
+
715
+ # If the hooks create history then we want those cookies too
716
+ for resp in r.history:
717
+ extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
718
+
719
+ extract_cookies_to_jar(self.cookies, request, r.raw)
720
+
721
+ # Resolve redirects if allowed.
722
+ if allow_redirects:
723
+ # Redirect resolving generator.
724
+ gen = self.resolve_redirects(r, request, **kwargs)
725
+ history = [resp for resp in gen]
726
+ else:
727
+ history = []
728
+
729
+ # Shuffle things around if there's history.
730
+ if history:
731
+ # Insert the first (original) request at the start
732
+ history.insert(0, r)
733
+ # Get the last request made
734
+ r = history.pop()
735
+ r.history = history
736
+
737
+ # If redirects aren't being followed, store the response on the Request for Response.next().
738
+ if not allow_redirects:
739
+ try:
740
+ r._next = next(
741
+ self.resolve_redirects(r, request, yield_requests=True, **kwargs)
742
+ )
743
+ except StopIteration:
744
+ pass
745
+
746
+ if not stream:
747
+ r.content
748
+
749
+ return r
750
+
751
+ def merge_environment_settings(self, url, proxies, stream, verify, cert):
752
+ """
753
+ Check the environment and merge it with some settings.
754
+
755
+ :rtype: dict
756
+ """
757
+ # Gather clues from the surrounding environment.
758
+ if self.trust_env:
759
+ # Set environment's proxies.
760
+ no_proxy = proxies.get("no_proxy") if proxies is not None else None
761
+ env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
762
+ for (k, v) in env_proxies.items():
763
+ proxies.setdefault(k, v)
764
+
765
+ # Look for requests environment configuration
766
+ # and be compatible with cURL.
767
+ if verify is True or verify is None:
768
+ verify = (
769
+ os.environ.get("REQUESTS_CA_BUNDLE")
770
+ or os.environ.get("CURL_CA_BUNDLE")
771
+ or verify
772
+ )
773
+
774
+ # Merge all the kwargs.
775
+ proxies = merge_setting(proxies, self.proxies)
776
+ stream = merge_setting(stream, self.stream)
777
+ verify = merge_setting(verify, self.verify)
778
+ cert = merge_setting(cert, self.cert)
779
+
780
+ return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
781
+
782
+ def get_adapter(self, url):
783
+ """
784
+ Returns the appropriate connection adapter for the given URL.
785
+
786
+ :rtype: requests.adapters.BaseAdapter
787
+ """
788
+ for (prefix, adapter) in self.adapters.items():
789
+
790
+ if url.lower().startswith(prefix.lower()):
791
+ return adapter
792
+
793
+ # Nothing matches :-/
794
+ raise InvalidSchema(f"No connection adapters were found for {url!r}")
795
+
796
+ def close(self):
797
+ """Closes all adapters and as such the session"""
798
+ for v in self.adapters.values():
799
+ v.close()
800
+
801
+ def mount(self, prefix, adapter):
802
+ """Registers a connection adapter to a prefix.
803
+
804
+ Adapters are sorted in descending order by prefix length.
805
+ """
806
+ self.adapters[prefix] = adapter
807
+ keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
808
+
809
+ for key in keys_to_move:
810
+ self.adapters[key] = self.adapters.pop(key)
811
+
812
+ def __getstate__(self):
813
+ state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
814
+ return state
815
+
816
+ def __setstate__(self, state):
817
+ for attr, value in state.items():
818
+ setattr(self, attr, value)
819
+
820
+
821
+ def session():
822
+ """
823
+ Returns a :class:`Session` for context-management.
824
+
825
+ .. deprecated:: 1.0.0
826
+
827
+ This method has been deprecated since version 1.0.0 and is only kept for
828
+ backwards compatibility. New code should use :class:`~requests.sessions.Session`
829
+ to create a session. This may be removed at a future date.
830
+
831
+ :rtype: Session
832
+ """
833
+ return Session()
env-llmeval/lib/python3.10/site-packages/requests/status_codes.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ The ``codes`` object defines a mapping from common names for HTTP statuses
3
+ to their numerical codes, accessible either as attributes or as dictionary
4
+ items.
5
+
6
+ Example::
7
+
8
+ >>> import requests
9
+ >>> requests.codes['temporary_redirect']
10
+ 307
11
+ >>> requests.codes.teapot
12
+ 418
13
+ >>> requests.codes['\o/']
14
+ 200
15
+
16
+ Some codes have multiple names, and both upper- and lower-case versions of
17
+ the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
18
+ ``codes.okay`` all correspond to the HTTP status code 200.
19
+ """
20
+
21
+ from .structures import LookupDict
22
+
23
+ _codes = {
24
+ # Informational.
25
+ 100: ("continue",),
26
+ 101: ("switching_protocols",),
27
+ 102: ("processing",),
28
+ 103: ("checkpoint",),
29
+ 122: ("uri_too_long", "request_uri_too_long"),
30
+ 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
31
+ 201: ("created",),
32
+ 202: ("accepted",),
33
+ 203: ("non_authoritative_info", "non_authoritative_information"),
34
+ 204: ("no_content",),
35
+ 205: ("reset_content", "reset"),
36
+ 206: ("partial_content", "partial"),
37
+ 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
38
+ 208: ("already_reported",),
39
+ 226: ("im_used",),
40
+ # Redirection.
41
+ 300: ("multiple_choices",),
42
+ 301: ("moved_permanently", "moved", "\\o-"),
43
+ 302: ("found",),
44
+ 303: ("see_other", "other"),
45
+ 304: ("not_modified",),
46
+ 305: ("use_proxy",),
47
+ 306: ("switch_proxy",),
48
+ 307: ("temporary_redirect", "temporary_moved", "temporary"),
49
+ 308: (
50
+ "permanent_redirect",
51
+ "resume_incomplete",
52
+ "resume",
53
+ ), # "resume" and "resume_incomplete" to be removed in 3.0
54
+ # Client Error.
55
+ 400: ("bad_request", "bad"),
56
+ 401: ("unauthorized",),
57
+ 402: ("payment_required", "payment"),
58
+ 403: ("forbidden",),
59
+ 404: ("not_found", "-o-"),
60
+ 405: ("method_not_allowed", "not_allowed"),
61
+ 406: ("not_acceptable",),
62
+ 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
63
+ 408: ("request_timeout", "timeout"),
64
+ 409: ("conflict",),
65
+ 410: ("gone",),
66
+ 411: ("length_required",),
67
+ 412: ("precondition_failed", "precondition"),
68
+ 413: ("request_entity_too_large",),
69
+ 414: ("request_uri_too_large",),
70
+ 415: ("unsupported_media_type", "unsupported_media", "media_type"),
71
+ 416: (
72
+ "requested_range_not_satisfiable",
73
+ "requested_range",
74
+ "range_not_satisfiable",
75
+ ),
76
+ 417: ("expectation_failed",),
77
+ 418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
78
+ 421: ("misdirected_request",),
79
+ 422: ("unprocessable_entity", "unprocessable"),
80
+ 423: ("locked",),
81
+ 424: ("failed_dependency", "dependency"),
82
+ 425: ("unordered_collection", "unordered"),
83
+ 426: ("upgrade_required", "upgrade"),
84
+ 428: ("precondition_required", "precondition"),
85
+ 429: ("too_many_requests", "too_many"),
86
+ 431: ("header_fields_too_large", "fields_too_large"),
87
+ 444: ("no_response", "none"),
88
+ 449: ("retry_with", "retry"),
89
+ 450: ("blocked_by_windows_parental_controls", "parental_controls"),
90
+ 451: ("unavailable_for_legal_reasons", "legal_reasons"),
91
+ 499: ("client_closed_request",),
92
+ # Server Error.
93
+ 500: ("internal_server_error", "server_error", "/o\\", "✗"),
94
+ 501: ("not_implemented",),
95
+ 502: ("bad_gateway",),
96
+ 503: ("service_unavailable", "unavailable"),
97
+ 504: ("gateway_timeout",),
98
+ 505: ("http_version_not_supported", "http_version"),
99
+ 506: ("variant_also_negotiates",),
100
+ 507: ("insufficient_storage",),
101
+ 509: ("bandwidth_limit_exceeded", "bandwidth"),
102
+ 510: ("not_extended",),
103
+ 511: ("network_authentication_required", "network_auth", "network_authentication"),
104
+ }
105
+
106
+ codes = LookupDict(name="status_codes")
107
+
108
+
109
+ def _init():
110
+ for code, titles in _codes.items():
111
+ for title in titles:
112
+ setattr(codes, title, code)
113
+ if not title.startswith(("\\", "/")):
114
+ setattr(codes, title.upper(), code)
115
+
116
+ def doc(code):
117
+ names = ", ".join(f"``{n}``" for n in _codes[code])
118
+ return "* %d: %s" % (code, names)
119
+
120
+ global __doc__
121
+ __doc__ = (
122
+ __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
123
+ if __doc__ is not None
124
+ else None
125
+ )
126
+
127
+
128
+ _init()
env-llmeval/lib/python3.10/site-packages/requests/utils.py ADDED
@@ -0,0 +1,1094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ requests.utils
3
+ ~~~~~~~~~~~~~~
4
+
5
+ This module provides utility functions that are used within Requests
6
+ that are also useful for external consumption.
7
+ """
8
+
9
+ import codecs
10
+ import contextlib
11
+ import io
12
+ import os
13
+ import re
14
+ import socket
15
+ import struct
16
+ import sys
17
+ import tempfile
18
+ import warnings
19
+ import zipfile
20
+ from collections import OrderedDict
21
+
22
+ from urllib3.util import make_headers, parse_url
23
+
24
+ from . import certs
25
+ from .__version__ import __version__
26
+
27
+ # to_native_string is unused here, but imported here for backwards compatibility
28
+ from ._internal_utils import ( # noqa: F401
29
+ _HEADER_VALIDATORS_BYTE,
30
+ _HEADER_VALIDATORS_STR,
31
+ HEADER_VALIDATORS,
32
+ to_native_string,
33
+ )
34
+ from .compat import (
35
+ Mapping,
36
+ basestring,
37
+ bytes,
38
+ getproxies,
39
+ getproxies_environment,
40
+ integer_types,
41
+ )
42
+ from .compat import parse_http_list as _parse_list_header
43
+ from .compat import (
44
+ proxy_bypass,
45
+ proxy_bypass_environment,
46
+ quote,
47
+ str,
48
+ unquote,
49
+ urlparse,
50
+ urlunparse,
51
+ )
52
+ from .cookies import cookiejar_from_dict
53
+ from .exceptions import (
54
+ FileModeWarning,
55
+ InvalidHeader,
56
+ InvalidURL,
57
+ UnrewindableBodyError,
58
+ )
59
+ from .structures import CaseInsensitiveDict
60
+
61
+ NETRC_FILES = (".netrc", "_netrc")
62
+
63
+ DEFAULT_CA_BUNDLE_PATH = certs.where()
64
+
65
+ DEFAULT_PORTS = {"http": 80, "https": 443}
66
+
67
+ # Ensure that ', ' is used to preserve previous delimiter behavior.
68
+ DEFAULT_ACCEPT_ENCODING = ", ".join(
69
+ re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
70
+ )
71
+
72
+
73
+ if sys.platform == "win32":
74
+ # provide a proxy_bypass version on Windows without DNS lookups
75
+
76
+ def proxy_bypass_registry(host):
77
+ try:
78
+ import winreg
79
+ except ImportError:
80
+ return False
81
+
82
+ try:
83
+ internetSettings = winreg.OpenKey(
84
+ winreg.HKEY_CURRENT_USER,
85
+ r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
86
+ )
87
+ # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
88
+ proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
89
+ # ProxyOverride is almost always a string
90
+ proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
91
+ except (OSError, ValueError):
92
+ return False
93
+ if not proxyEnable or not proxyOverride:
94
+ return False
95
+
96
+ # make a check value list from the registry entry: replace the
97
+ # '<local>' string by the localhost entry and the corresponding
98
+ # canonical entry.
99
+ proxyOverride = proxyOverride.split(";")
100
+ # now check if we match one of the registry values.
101
+ for test in proxyOverride:
102
+ if test == "<local>":
103
+ if "." not in host:
104
+ return True
105
+ test = test.replace(".", r"\.") # mask dots
106
+ test = test.replace("*", r".*") # change glob sequence
107
+ test = test.replace("?", r".") # change glob char
108
+ if re.match(test, host, re.I):
109
+ return True
110
+ return False
111
+
112
+ def proxy_bypass(host): # noqa
113
+ """Return True, if the host should be bypassed.
114
+
115
+ Checks proxy settings gathered from the environment, if specified,
116
+ or the registry.
117
+ """
118
+ if getproxies_environment():
119
+ return proxy_bypass_environment(host)
120
+ else:
121
+ return proxy_bypass_registry(host)
122
+
123
+
124
+ def dict_to_sequence(d):
125
+ """Returns an internal sequence dictionary update."""
126
+
127
+ if hasattr(d, "items"):
128
+ d = d.items()
129
+
130
+ return d
131
+
132
+
133
+ def super_len(o):
134
+ total_length = None
135
+ current_position = 0
136
+
137
+ if hasattr(o, "__len__"):
138
+ total_length = len(o)
139
+
140
+ elif hasattr(o, "len"):
141
+ total_length = o.len
142
+
143
+ elif hasattr(o, "fileno"):
144
+ try:
145
+ fileno = o.fileno()
146
+ except (io.UnsupportedOperation, AttributeError):
147
+ # AttributeError is a surprising exception, seeing as how we've just checked
148
+ # that `hasattr(o, 'fileno')`. It happens for objects obtained via
149
+ # `Tarfile.extractfile()`, per issue 5229.
150
+ pass
151
+ else:
152
+ total_length = os.fstat(fileno).st_size
153
+
154
+ # Having used fstat to determine the file length, we need to
155
+ # confirm that this file was opened up in binary mode.
156
+ if "b" not in o.mode:
157
+ warnings.warn(
158
+ (
159
+ "Requests has determined the content-length for this "
160
+ "request using the binary size of the file: however, the "
161
+ "file has been opened in text mode (i.e. without the 'b' "
162
+ "flag in the mode). This may lead to an incorrect "
163
+ "content-length. In Requests 3.0, support will be removed "
164
+ "for files in text mode."
165
+ ),
166
+ FileModeWarning,
167
+ )
168
+
169
+ if hasattr(o, "tell"):
170
+ try:
171
+ current_position = o.tell()
172
+ except OSError:
173
+ # This can happen in some weird situations, such as when the file
174
+ # is actually a special file descriptor like stdin. In this
175
+ # instance, we don't know what the length is, so set it to zero and
176
+ # let requests chunk it instead.
177
+ if total_length is not None:
178
+ current_position = total_length
179
+ else:
180
+ if hasattr(o, "seek") and total_length is None:
181
+ # StringIO and BytesIO have seek but no usable fileno
182
+ try:
183
+ # seek to end of file
184
+ o.seek(0, 2)
185
+ total_length = o.tell()
186
+
187
+ # seek back to current position to support
188
+ # partially read file-like objects
189
+ o.seek(current_position or 0)
190
+ except OSError:
191
+ total_length = 0
192
+
193
+ if total_length is None:
194
+ total_length = 0
195
+
196
+ return max(0, total_length - current_position)
197
+
198
+
199
+ def get_netrc_auth(url, raise_errors=False):
200
+ """Returns the Requests tuple auth for a given url from netrc."""
201
+
202
+ netrc_file = os.environ.get("NETRC")
203
+ if netrc_file is not None:
204
+ netrc_locations = (netrc_file,)
205
+ else:
206
+ netrc_locations = (f"~/{f}" for f in NETRC_FILES)
207
+
208
+ try:
209
+ from netrc import NetrcParseError, netrc
210
+
211
+ netrc_path = None
212
+
213
+ for f in netrc_locations:
214
+ try:
215
+ loc = os.path.expanduser(f)
216
+ except KeyError:
217
+ # os.path.expanduser can fail when $HOME is undefined and
218
+ # getpwuid fails. See https://bugs.python.org/issue20164 &
219
+ # https://github.com/psf/requests/issues/1846
220
+ return
221
+
222
+ if os.path.exists(loc):
223
+ netrc_path = loc
224
+ break
225
+
226
+ # Abort early if there isn't one.
227
+ if netrc_path is None:
228
+ return
229
+
230
+ ri = urlparse(url)
231
+
232
+ # Strip port numbers from netloc. This weird `if...encode`` dance is
233
+ # used for Python 3.2, which doesn't support unicode literals.
234
+ splitstr = b":"
235
+ if isinstance(url, str):
236
+ splitstr = splitstr.decode("ascii")
237
+ host = ri.netloc.split(splitstr)[0]
238
+
239
+ try:
240
+ _netrc = netrc(netrc_path).authenticators(host)
241
+ if _netrc:
242
+ # Return with login / password
243
+ login_i = 0 if _netrc[0] else 1
244
+ return (_netrc[login_i], _netrc[2])
245
+ except (NetrcParseError, OSError):
246
+ # If there was a parsing error or a permissions issue reading the file,
247
+ # we'll just skip netrc auth unless explicitly asked to raise errors.
248
+ if raise_errors:
249
+ raise
250
+
251
+ # App Engine hackiness.
252
+ except (ImportError, AttributeError):
253
+ pass
254
+
255
+
256
+ def guess_filename(obj):
257
+ """Tries to guess the filename of the given object."""
258
+ name = getattr(obj, "name", None)
259
+ if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
260
+ return os.path.basename(name)
261
+
262
+
263
+ def extract_zipped_paths(path):
264
+ """Replace nonexistent paths that look like they refer to a member of a zip
265
+ archive with the location of an extracted copy of the target, or else
266
+ just return the provided path unchanged.
267
+ """
268
+ if os.path.exists(path):
269
+ # this is already a valid path, no need to do anything further
270
+ return path
271
+
272
+ # find the first valid part of the provided path and treat that as a zip archive
273
+ # assume the rest of the path is the name of a member in the archive
274
+ archive, member = os.path.split(path)
275
+ while archive and not os.path.exists(archive):
276
+ archive, prefix = os.path.split(archive)
277
+ if not prefix:
278
+ # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
279
+ # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
280
+ break
281
+ member = "/".join([prefix, member])
282
+
283
+ if not zipfile.is_zipfile(archive):
284
+ return path
285
+
286
+ zip_file = zipfile.ZipFile(archive)
287
+ if member not in zip_file.namelist():
288
+ return path
289
+
290
+ # we have a valid zip archive and a valid member of that archive
291
+ tmp = tempfile.gettempdir()
292
+ extracted_path = os.path.join(tmp, member.split("/")[-1])
293
+ if not os.path.exists(extracted_path):
294
+ # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
295
+ with atomic_open(extracted_path) as file_handler:
296
+ file_handler.write(zip_file.read(member))
297
+ return extracted_path
298
+
299
+
300
+ @contextlib.contextmanager
301
+ def atomic_open(filename):
302
+ """Write a file to the disk in an atomic fashion"""
303
+ tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
304
+ try:
305
+ with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
306
+ yield tmp_handler
307
+ os.replace(tmp_name, filename)
308
+ except BaseException:
309
+ os.remove(tmp_name)
310
+ raise
311
+
312
+
313
+ def from_key_val_list(value):
314
+ """Take an object and test to see if it can be represented as a
315
+ dictionary. Unless it can not be represented as such, return an
316
+ OrderedDict, e.g.,
317
+
318
+ ::
319
+
320
+ >>> from_key_val_list([('key', 'val')])
321
+ OrderedDict([('key', 'val')])
322
+ >>> from_key_val_list('string')
323
+ Traceback (most recent call last):
324
+ ...
325
+ ValueError: cannot encode objects that are not 2-tuples
326
+ >>> from_key_val_list({'key': 'val'})
327
+ OrderedDict([('key', 'val')])
328
+
329
+ :rtype: OrderedDict
330
+ """
331
+ if value is None:
332
+ return None
333
+
334
+ if isinstance(value, (str, bytes, bool, int)):
335
+ raise ValueError("cannot encode objects that are not 2-tuples")
336
+
337
+ return OrderedDict(value)
338
+
339
+
340
+ def to_key_val_list(value):
341
+ """Take an object and test to see if it can be represented as a
342
+ dictionary. If it can be, return a list of tuples, e.g.,
343
+
344
+ ::
345
+
346
+ >>> to_key_val_list([('key', 'val')])
347
+ [('key', 'val')]
348
+ >>> to_key_val_list({'key': 'val'})
349
+ [('key', 'val')]
350
+ >>> to_key_val_list('string')
351
+ Traceback (most recent call last):
352
+ ...
353
+ ValueError: cannot encode objects that are not 2-tuples
354
+
355
+ :rtype: list
356
+ """
357
+ if value is None:
358
+ return None
359
+
360
+ if isinstance(value, (str, bytes, bool, int)):
361
+ raise ValueError("cannot encode objects that are not 2-tuples")
362
+
363
+ if isinstance(value, Mapping):
364
+ value = value.items()
365
+
366
+ return list(value)
367
+
368
+
369
+ # From mitsuhiko/werkzeug (used with permission).
370
+ def parse_list_header(value):
371
+ """Parse lists as described by RFC 2068 Section 2.
372
+
373
+ In particular, parse comma-separated lists where the elements of
374
+ the list may include quoted-strings. A quoted-string could
375
+ contain a comma. A non-quoted string could have quotes in the
376
+ middle. Quotes are removed automatically after parsing.
377
+
378
+ It basically works like :func:`parse_set_header` just that items
379
+ may appear multiple times and case sensitivity is preserved.
380
+
381
+ The return value is a standard :class:`list`:
382
+
383
+ >>> parse_list_header('token, "quoted value"')
384
+ ['token', 'quoted value']
385
+
386
+ To create a header from the :class:`list` again, use the
387
+ :func:`dump_header` function.
388
+
389
+ :param value: a string with a list header.
390
+ :return: :class:`list`
391
+ :rtype: list
392
+ """
393
+ result = []
394
+ for item in _parse_list_header(value):
395
+ if item[:1] == item[-1:] == '"':
396
+ item = unquote_header_value(item[1:-1])
397
+ result.append(item)
398
+ return result
399
+
400
+
401
+ # From mitsuhiko/werkzeug (used with permission).
402
+ def parse_dict_header(value):
403
+ """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
404
+ convert them into a python dict:
405
+
406
+ >>> d = parse_dict_header('foo="is a fish", bar="as well"')
407
+ >>> type(d) is dict
408
+ True
409
+ >>> sorted(d.items())
410
+ [('bar', 'as well'), ('foo', 'is a fish')]
411
+
412
+ If there is no value for a key it will be `None`:
413
+
414
+ >>> parse_dict_header('key_without_value')
415
+ {'key_without_value': None}
416
+
417
+ To create a header from the :class:`dict` again, use the
418
+ :func:`dump_header` function.
419
+
420
+ :param value: a string with a dict header.
421
+ :return: :class:`dict`
422
+ :rtype: dict
423
+ """
424
+ result = {}
425
+ for item in _parse_list_header(value):
426
+ if "=" not in item:
427
+ result[item] = None
428
+ continue
429
+ name, value = item.split("=", 1)
430
+ if value[:1] == value[-1:] == '"':
431
+ value = unquote_header_value(value[1:-1])
432
+ result[name] = value
433
+ return result
434
+
435
+
436
+ # From mitsuhiko/werkzeug (used with permission).
437
+ def unquote_header_value(value, is_filename=False):
438
+ r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
439
+ This does not use the real unquoting but what browsers are actually
440
+ using for quoting.
441
+
442
+ :param value: the header value to unquote.
443
+ :rtype: str
444
+ """
445
+ if value and value[0] == value[-1] == '"':
446
+ # this is not the real unquoting, but fixing this so that the
447
+ # RFC is met will result in bugs with internet explorer and
448
+ # probably some other browsers as well. IE for example is
449
+ # uploading files with "C:\foo\bar.txt" as filename
450
+ value = value[1:-1]
451
+
452
+ # if this is a filename and the starting characters look like
453
+ # a UNC path, then just return the value without quotes. Using the
454
+ # replace sequence below on a UNC path has the effect of turning
455
+ # the leading double slash into a single slash and then
456
+ # _fix_ie_filename() doesn't work correctly. See #458.
457
+ if not is_filename or value[:2] != "\\\\":
458
+ return value.replace("\\\\", "\\").replace('\\"', '"')
459
+ return value
460
+
461
+
462
+ def dict_from_cookiejar(cj):
463
+ """Returns a key/value dictionary from a CookieJar.
464
+
465
+ :param cj: CookieJar object to extract cookies from.
466
+ :rtype: dict
467
+ """
468
+
469
+ cookie_dict = {}
470
+
471
+ for cookie in cj:
472
+ cookie_dict[cookie.name] = cookie.value
473
+
474
+ return cookie_dict
475
+
476
+
477
+ def add_dict_to_cookiejar(cj, cookie_dict):
478
+ """Returns a CookieJar from a key/value dictionary.
479
+
480
+ :param cj: CookieJar to insert cookies into.
481
+ :param cookie_dict: Dict of key/values to insert into CookieJar.
482
+ :rtype: CookieJar
483
+ """
484
+
485
+ return cookiejar_from_dict(cookie_dict, cj)
486
+
487
+
488
+ def get_encodings_from_content(content):
489
+ """Returns encodings from given content string.
490
+
491
+ :param content: bytestring to extract encodings from.
492
+ """
493
+ warnings.warn(
494
+ (
495
+ "In requests 3.0, get_encodings_from_content will be removed. For "
496
+ "more information, please see the discussion on issue #2266. (This"
497
+ " warning should only appear once.)"
498
+ ),
499
+ DeprecationWarning,
500
+ )
501
+
502
+ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
503
+ pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
504
+ xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
505
+
506
+ return (
507
+ charset_re.findall(content)
508
+ + pragma_re.findall(content)
509
+ + xml_re.findall(content)
510
+ )
511
+
512
+
513
+ def _parse_content_type_header(header):
514
+ """Returns content type and parameters from given header
515
+
516
+ :param header: string
517
+ :return: tuple containing content type and dictionary of
518
+ parameters
519
+ """
520
+
521
+ tokens = header.split(";")
522
+ content_type, params = tokens[0].strip(), tokens[1:]
523
+ params_dict = {}
524
+ items_to_strip = "\"' "
525
+
526
+ for param in params:
527
+ param = param.strip()
528
+ if param:
529
+ key, value = param, True
530
+ index_of_equals = param.find("=")
531
+ if index_of_equals != -1:
532
+ key = param[:index_of_equals].strip(items_to_strip)
533
+ value = param[index_of_equals + 1 :].strip(items_to_strip)
534
+ params_dict[key.lower()] = value
535
+ return content_type, params_dict
536
+
537
+
538
+ def get_encoding_from_headers(headers):
539
+ """Returns encodings from given HTTP Header Dict.
540
+
541
+ :param headers: dictionary to extract encoding from.
542
+ :rtype: str
543
+ """
544
+
545
+ content_type = headers.get("content-type")
546
+
547
+ if not content_type:
548
+ return None
549
+
550
+ content_type, params = _parse_content_type_header(content_type)
551
+
552
+ if "charset" in params:
553
+ return params["charset"].strip("'\"")
554
+
555
+ if "text" in content_type:
556
+ return "ISO-8859-1"
557
+
558
+ if "application/json" in content_type:
559
+ # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
560
+ return "utf-8"
561
+
562
+
563
+ def stream_decode_response_unicode(iterator, r):
564
+ """Stream decodes an iterator."""
565
+
566
+ if r.encoding is None:
567
+ yield from iterator
568
+ return
569
+
570
+ decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
571
+ for chunk in iterator:
572
+ rv = decoder.decode(chunk)
573
+ if rv:
574
+ yield rv
575
+ rv = decoder.decode(b"", final=True)
576
+ if rv:
577
+ yield rv
578
+
579
+
580
+ def iter_slices(string, slice_length):
581
+ """Iterate over slices of a string."""
582
+ pos = 0
583
+ if slice_length is None or slice_length <= 0:
584
+ slice_length = len(string)
585
+ while pos < len(string):
586
+ yield string[pos : pos + slice_length]
587
+ pos += slice_length
588
+
589
+
590
+ def get_unicode_from_response(r):
591
+ """Returns the requested content back in unicode.
592
+
593
+ :param r: Response object to get unicode content from.
594
+
595
+ Tried:
596
+
597
+ 1. charset from content-type
598
+ 2. fall back and replace all unicode characters
599
+
600
+ :rtype: str
601
+ """
602
+ warnings.warn(
603
+ (
604
+ "In requests 3.0, get_unicode_from_response will be removed. For "
605
+ "more information, please see the discussion on issue #2266. (This"
606
+ " warning should only appear once.)"
607
+ ),
608
+ DeprecationWarning,
609
+ )
610
+
611
+ tried_encodings = []
612
+
613
+ # Try charset from content-type
614
+ encoding = get_encoding_from_headers(r.headers)
615
+
616
+ if encoding:
617
+ try:
618
+ return str(r.content, encoding)
619
+ except UnicodeError:
620
+ tried_encodings.append(encoding)
621
+
622
+ # Fall back:
623
+ try:
624
+ return str(r.content, encoding, errors="replace")
625
+ except TypeError:
626
+ return r.content
627
+
628
+
629
+ # The unreserved URI characters (RFC 3986)
630
+ UNRESERVED_SET = frozenset(
631
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
632
+ )
633
+
634
+
635
+ def unquote_unreserved(uri):
636
+ """Un-escape any percent-escape sequences in a URI that are unreserved
637
+ characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
638
+
639
+ :rtype: str
640
+ """
641
+ parts = uri.split("%")
642
+ for i in range(1, len(parts)):
643
+ h = parts[i][0:2]
644
+ if len(h) == 2 and h.isalnum():
645
+ try:
646
+ c = chr(int(h, 16))
647
+ except ValueError:
648
+ raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
649
+
650
+ if c in UNRESERVED_SET:
651
+ parts[i] = c + parts[i][2:]
652
+ else:
653
+ parts[i] = f"%{parts[i]}"
654
+ else:
655
+ parts[i] = f"%{parts[i]}"
656
+ return "".join(parts)
657
+
658
+
659
+ def requote_uri(uri):
660
+ """Re-quote the given URI.
661
+
662
+ This function passes the given URI through an unquote/quote cycle to
663
+ ensure that it is fully and consistently quoted.
664
+
665
+ :rtype: str
666
+ """
667
+ safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
668
+ safe_without_percent = "!#$&'()*+,/:;=?@[]~"
669
+ try:
670
+ # Unquote only the unreserved characters
671
+ # Then quote only illegal characters (do not quote reserved,
672
+ # unreserved, or '%')
673
+ return quote(unquote_unreserved(uri), safe=safe_with_percent)
674
+ except InvalidURL:
675
+ # We couldn't unquote the given URI, so let's try quoting it, but
676
+ # there may be unquoted '%'s in the URI. We need to make sure they're
677
+ # properly quoted so they do not cause issues elsewhere.
678
+ return quote(uri, safe=safe_without_percent)
679
+
680
+
681
+ def address_in_network(ip, net):
682
+ """This function allows you to check if an IP belongs to a network subnet
683
+
684
+ Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
685
+ returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
686
+
687
+ :rtype: bool
688
+ """
689
+ ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
690
+ netaddr, bits = net.split("/")
691
+ netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
692
+ network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
693
+ return (ipaddr & netmask) == (network & netmask)
694
+
695
+
696
+ def dotted_netmask(mask):
697
+ """Converts mask from /xx format to xxx.xxx.xxx.xxx
698
+
699
+ Example: if mask is 24 function returns 255.255.255.0
700
+
701
+ :rtype: str
702
+ """
703
+ bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
704
+ return socket.inet_ntoa(struct.pack(">I", bits))
705
+
706
+
707
+ def is_ipv4_address(string_ip):
708
+ """
709
+ :rtype: bool
710
+ """
711
+ try:
712
+ socket.inet_aton(string_ip)
713
+ except OSError:
714
+ return False
715
+ return True
716
+
717
+
718
+ def is_valid_cidr(string_network):
719
+ """
720
+ Very simple check of the cidr format in no_proxy variable.
721
+
722
+ :rtype: bool
723
+ """
724
+ if string_network.count("/") == 1:
725
+ try:
726
+ mask = int(string_network.split("/")[1])
727
+ except ValueError:
728
+ return False
729
+
730
+ if mask < 1 or mask > 32:
731
+ return False
732
+
733
+ try:
734
+ socket.inet_aton(string_network.split("/")[0])
735
+ except OSError:
736
+ return False
737
+ else:
738
+ return False
739
+ return True
740
+
741
+
742
+ @contextlib.contextmanager
743
+ def set_environ(env_name, value):
744
+ """Set the environment variable 'env_name' to 'value'
745
+
746
+ Save previous value, yield, and then restore the previous value stored in
747
+ the environment variable 'env_name'.
748
+
749
+ If 'value' is None, do nothing"""
750
+ value_changed = value is not None
751
+ if value_changed:
752
+ old_value = os.environ.get(env_name)
753
+ os.environ[env_name] = value
754
+ try:
755
+ yield
756
+ finally:
757
+ if value_changed:
758
+ if old_value is None:
759
+ del os.environ[env_name]
760
+ else:
761
+ os.environ[env_name] = old_value
762
+
763
+
764
+ def should_bypass_proxies(url, no_proxy):
765
+ """
766
+ Returns whether we should bypass proxies or not.
767
+
768
+ :rtype: bool
769
+ """
770
+ # Prioritize lowercase environment variables over uppercase
771
+ # to keep a consistent behaviour with other http projects (curl, wget).
772
+ def get_proxy(key):
773
+ return os.environ.get(key) or os.environ.get(key.upper())
774
+
775
+ # First check whether no_proxy is defined. If it is, check that the URL
776
+ # we're getting isn't in the no_proxy list.
777
+ no_proxy_arg = no_proxy
778
+ if no_proxy is None:
779
+ no_proxy = get_proxy("no_proxy")
780
+ parsed = urlparse(url)
781
+
782
+ if parsed.hostname is None:
783
+ # URLs don't always have hostnames, e.g. file:/// urls.
784
+ return True
785
+
786
+ if no_proxy:
787
+ # We need to check whether we match here. We need to see if we match
788
+ # the end of the hostname, both with and without the port.
789
+ no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
790
+
791
+ if is_ipv4_address(parsed.hostname):
792
+ for proxy_ip in no_proxy:
793
+ if is_valid_cidr(proxy_ip):
794
+ if address_in_network(parsed.hostname, proxy_ip):
795
+ return True
796
+ elif parsed.hostname == proxy_ip:
797
+ # If no_proxy ip was defined in plain IP notation instead of cidr notation &
798
+ # matches the IP of the index
799
+ return True
800
+ else:
801
+ host_with_port = parsed.hostname
802
+ if parsed.port:
803
+ host_with_port += f":{parsed.port}"
804
+
805
+ for host in no_proxy:
806
+ if parsed.hostname.endswith(host) or host_with_port.endswith(host):
807
+ # The URL does match something in no_proxy, so we don't want
808
+ # to apply the proxies on this URL.
809
+ return True
810
+
811
+ with set_environ("no_proxy", no_proxy_arg):
812
+ # parsed.hostname can be `None` in cases such as a file URI.
813
+ try:
814
+ bypass = proxy_bypass(parsed.hostname)
815
+ except (TypeError, socket.gaierror):
816
+ bypass = False
817
+
818
+ if bypass:
819
+ return True
820
+
821
+ return False
822
+
823
+
824
+ def get_environ_proxies(url, no_proxy=None):
825
+ """
826
+ Return a dict of environment proxies.
827
+
828
+ :rtype: dict
829
+ """
830
+ if should_bypass_proxies(url, no_proxy=no_proxy):
831
+ return {}
832
+ else:
833
+ return getproxies()
834
+
835
+
836
+ def select_proxy(url, proxies):
837
+ """Select a proxy for the url, if applicable.
838
+
839
+ :param url: The url being for the request
840
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
841
+ """
842
+ proxies = proxies or {}
843
+ urlparts = urlparse(url)
844
+ if urlparts.hostname is None:
845
+ return proxies.get(urlparts.scheme, proxies.get("all"))
846
+
847
+ proxy_keys = [
848
+ urlparts.scheme + "://" + urlparts.hostname,
849
+ urlparts.scheme,
850
+ "all://" + urlparts.hostname,
851
+ "all",
852
+ ]
853
+ proxy = None
854
+ for proxy_key in proxy_keys:
855
+ if proxy_key in proxies:
856
+ proxy = proxies[proxy_key]
857
+ break
858
+
859
+ return proxy
860
+
861
+
862
+ def resolve_proxies(request, proxies, trust_env=True):
863
+ """This method takes proxy information from a request and configuration
864
+ input to resolve a mapping of target proxies. This will consider settings
865
+ such a NO_PROXY to strip proxy configurations.
866
+
867
+ :param request: Request or PreparedRequest
868
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
869
+ :param trust_env: Boolean declaring whether to trust environment configs
870
+
871
+ :rtype: dict
872
+ """
873
+ proxies = proxies if proxies is not None else {}
874
+ url = request.url
875
+ scheme = urlparse(url).scheme
876
+ no_proxy = proxies.get("no_proxy")
877
+ new_proxies = proxies.copy()
878
+
879
+ if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
880
+ environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
881
+
882
+ proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
883
+
884
+ if proxy:
885
+ new_proxies.setdefault(scheme, proxy)
886
+ return new_proxies
887
+
888
+
889
+ def default_user_agent(name="python-requests"):
890
+ """
891
+ Return a string representing the default user agent.
892
+
893
+ :rtype: str
894
+ """
895
+ return f"{name}/{__version__}"
896
+
897
+
898
+ def default_headers():
899
+ """
900
+ :rtype: requests.structures.CaseInsensitiveDict
901
+ """
902
+ return CaseInsensitiveDict(
903
+ {
904
+ "User-Agent": default_user_agent(),
905
+ "Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
906
+ "Accept": "*/*",
907
+ "Connection": "keep-alive",
908
+ }
909
+ )
910
+
911
+
912
+ def parse_header_links(value):
913
+ """Return a list of parsed link headers proxies.
914
+
915
+ i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
916
+
917
+ :rtype: list
918
+ """
919
+
920
+ links = []
921
+
922
+ replace_chars = " '\""
923
+
924
+ value = value.strip(replace_chars)
925
+ if not value:
926
+ return links
927
+
928
+ for val in re.split(", *<", value):
929
+ try:
930
+ url, params = val.split(";", 1)
931
+ except ValueError:
932
+ url, params = val, ""
933
+
934
+ link = {"url": url.strip("<> '\"")}
935
+
936
+ for param in params.split(";"):
937
+ try:
938
+ key, value = param.split("=")
939
+ except ValueError:
940
+ break
941
+
942
+ link[key.strip(replace_chars)] = value.strip(replace_chars)
943
+
944
+ links.append(link)
945
+
946
+ return links
947
+
948
+
949
+ # Null bytes; no need to recreate these on each call to guess_json_utf
950
+ _null = "\x00".encode("ascii") # encoding to ASCII for Python 3
951
+ _null2 = _null * 2
952
+ _null3 = _null * 3
953
+
954
+
955
+ def guess_json_utf(data):
956
+ """
957
+ :rtype: str
958
+ """
959
+ # JSON always starts with two ASCII characters, so detection is as
960
+ # easy as counting the nulls and from their location and count
961
+ # determine the encoding. Also detect a BOM, if present.
962
+ sample = data[:4]
963
+ if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
964
+ return "utf-32" # BOM included
965
+ if sample[:3] == codecs.BOM_UTF8:
966
+ return "utf-8-sig" # BOM included, MS style (discouraged)
967
+ if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
968
+ return "utf-16" # BOM included
969
+ nullcount = sample.count(_null)
970
+ if nullcount == 0:
971
+ return "utf-8"
972
+ if nullcount == 2:
973
+ if sample[::2] == _null2: # 1st and 3rd are null
974
+ return "utf-16-be"
975
+ if sample[1::2] == _null2: # 2nd and 4th are null
976
+ return "utf-16-le"
977
+ # Did not detect 2 valid UTF-16 ascii-range characters
978
+ if nullcount == 3:
979
+ if sample[:3] == _null3:
980
+ return "utf-32-be"
981
+ if sample[1:] == _null3:
982
+ return "utf-32-le"
983
+ # Did not detect a valid UTF-32 ascii-range character
984
+ return None
985
+
986
+
987
+ def prepend_scheme_if_needed(url, new_scheme):
988
+ """Given a URL that may or may not have a scheme, prepend the given scheme.
989
+ Does not replace a present scheme with the one provided as an argument.
990
+
991
+ :rtype: str
992
+ """
993
+ parsed = parse_url(url)
994
+ scheme, auth, host, port, path, query, fragment = parsed
995
+
996
+ # A defect in urlparse determines that there isn't a netloc present in some
997
+ # urls. We previously assumed parsing was overly cautious, and swapped the
998
+ # netloc and path. Due to a lack of tests on the original defect, this is
999
+ # maintained with parse_url for backwards compatibility.
1000
+ netloc = parsed.netloc
1001
+ if not netloc:
1002
+ netloc, path = path, netloc
1003
+
1004
+ if auth:
1005
+ # parse_url doesn't provide the netloc with auth
1006
+ # so we'll add it ourselves.
1007
+ netloc = "@".join([auth, netloc])
1008
+ if scheme is None:
1009
+ scheme = new_scheme
1010
+ if path is None:
1011
+ path = ""
1012
+
1013
+ return urlunparse((scheme, netloc, path, "", query, fragment))
1014
+
1015
+
1016
+ def get_auth_from_url(url):
1017
+ """Given a url with authentication components, extract them into a tuple of
1018
+ username,password.
1019
+
1020
+ :rtype: (str,str)
1021
+ """
1022
+ parsed = urlparse(url)
1023
+
1024
+ try:
1025
+ auth = (unquote(parsed.username), unquote(parsed.password))
1026
+ except (AttributeError, TypeError):
1027
+ auth = ("", "")
1028
+
1029
+ return auth
1030
+
1031
+
1032
+ def check_header_validity(header):
1033
+ """Verifies that header parts don't contain leading whitespace
1034
+ reserved characters, or return characters.
1035
+
1036
+ :param header: tuple, in the format (name, value).
1037
+ """
1038
+ name, value = header
1039
+ _validate_header_part(header, name, 0)
1040
+ _validate_header_part(header, value, 1)
1041
+
1042
+
1043
+ def _validate_header_part(header, header_part, header_validator_index):
1044
+ if isinstance(header_part, str):
1045
+ validator = _HEADER_VALIDATORS_STR[header_validator_index]
1046
+ elif isinstance(header_part, bytes):
1047
+ validator = _HEADER_VALIDATORS_BYTE[header_validator_index]
1048
+ else:
1049
+ raise InvalidHeader(
1050
+ f"Header part ({header_part!r}) from {header} "
1051
+ f"must be of type str or bytes, not {type(header_part)}"
1052
+ )
1053
+
1054
+ if not validator.match(header_part):
1055
+ header_kind = "name" if header_validator_index == 0 else "value"
1056
+ raise InvalidHeader(
1057
+ f"Invalid leading whitespace, reserved character(s), or return"
1058
+ f"character(s) in header {header_kind}: {header_part!r}"
1059
+ )
1060
+
1061
+
1062
+ def urldefragauth(url):
1063
+ """
1064
+ Given a url remove the fragment and the authentication part.
1065
+
1066
+ :rtype: str
1067
+ """
1068
+ scheme, netloc, path, params, query, fragment = urlparse(url)
1069
+
1070
+ # see func:`prepend_scheme_if_needed`
1071
+ if not netloc:
1072
+ netloc, path = path, netloc
1073
+
1074
+ netloc = netloc.rsplit("@", 1)[-1]
1075
+
1076
+ return urlunparse((scheme, netloc, path, params, query, ""))
1077
+
1078
+
1079
+ def rewind_body(prepared_request):
1080
+ """Move file pointer back to its recorded starting position
1081
+ so it can be read again on redirect.
1082
+ """
1083
+ body_seek = getattr(prepared_request.body, "seek", None)
1084
+ if body_seek is not None and isinstance(
1085
+ prepared_request._body_position, integer_types
1086
+ ):
1087
+ try:
1088
+ body_seek(prepared_request._body_position)
1089
+ except OSError:
1090
+ raise UnrewindableBodyError(
1091
+ "An error occurred when rewinding request body for redirect."
1092
+ )
1093
+ else:
1094
+ raise UnrewindableBodyError("Unable to rewind request body for redirect.")
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
17
+ from ...utils import OptionalDependencyNotAvailable
18
+
19
+
20
+ _import_structure = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
21
+
22
+ try:
23
+ if not is_vision_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["feature_extraction_dpt"] = ["DPTFeatureExtractor"]
29
+ _import_structure["image_processing_dpt"] = ["DPTImageProcessor"]
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_dpt"] = [
38
+ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "DPTForDepthEstimation",
40
+ "DPTForSemanticSegmentation",
41
+ "DPTModel",
42
+ "DPTPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .feature_extraction_dpt import DPTFeatureExtractor
56
+ from .image_processing_dpt import DPTImageProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_dpt import (
65
+ DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ DPTForDepthEstimation,
67
+ DPTForSemanticSegmentation,
68
+ DPTModel,
69
+ DPTPreTrainedModel,
70
+ )
71
+
72
+
73
+ else:
74
+ import sys
75
+
76
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/configuration_dpt.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dinov2_depth_to_hf.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_beit_to_hf.cpython-310.pyc ADDED
Binary file (9.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_hybrid_to_pytorch.cpython-310.pyc ADDED
Binary file (8.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_swinv2_to_hf.cpython-310.pyc ADDED
Binary file (9.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/convert_dpt_to_pytorch.cpython-310.pyc ADDED
Binary file (8.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/feature_extraction_dpt.cpython-310.pyc ADDED
Binary file (993 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/image_processing_dpt.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/__pycache__/modeling_dpt.cpython-310.pyc ADDED
Binary file (42.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/configuration_dpt.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DPT model configuration"""
16
+
17
+ import copy
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+ from ..auto.configuration_auto import CONFIG_MAPPING
22
+ from ..bit import BitConfig
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class DPTConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`DPTModel`]. It is used to instantiate an DPT
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the DPT
36
+ [Intel/dpt-large](https://huggingface.co/Intel/dpt-large) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
57
+ The dropout ratio for the attention probabilities.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ image_size (`int`, *optional*, defaults to 384):
63
+ The size (resolution) of each image.
64
+ patch_size (`int`, *optional*, defaults to 16):
65
+ The size (resolution) of each patch.
66
+ num_channels (`int`, *optional*, defaults to 3):
67
+ The number of input channels.
68
+ is_hybrid (`bool`, *optional*, defaults to `False`):
69
+ Whether to use a hybrid backbone. Useful in the context of loading DPT-Hybrid models.
70
+ qkv_bias (`bool`, *optional*, defaults to `True`):
71
+ Whether to add a bias to the queries, keys and values.
72
+ backbone_out_indices (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`):
73
+ Indices of the intermediate hidden states to use from backbone.
74
+ readout_type (`str`, *optional*, defaults to `"project"`):
75
+ The readout type to use when processing the readout token (CLS token) of the intermediate hidden states of
76
+ the ViT backbone. Can be one of [`"ignore"`, `"add"`, `"project"`].
77
+
78
+ - "ignore" simply ignores the CLS token.
79
+ - "add" passes the information from the CLS token to all other tokens by adding the representations.
80
+ - "project" passes information to the other tokens by concatenating the readout to all other tokens before
81
+ projecting the
82
+ representation to the original feature dimension D using a linear layer followed by a GELU non-linearity.
83
+ reassemble_factors (`List[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):
84
+ The up/downsampling factors of the reassemble layers.
85
+ neck_hidden_sizes (`List[str]`, *optional*, defaults to `[96, 192, 384, 768]`):
86
+ The hidden sizes to project to for the feature maps of the backbone.
87
+ fusion_hidden_size (`int`, *optional*, defaults to 256):
88
+ The number of channels before fusion.
89
+ head_in_index (`int`, *optional*, defaults to -1):
90
+ The index of the features to use in the heads.
91
+ use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`):
92
+ Whether to use batch normalization in the pre-activate residual units of the fusion blocks.
93
+ use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`):
94
+ Whether to use bias in the pre-activate residual units of the fusion blocks.
95
+ add_projection (`bool`, *optional*, defaults to `False`):
96
+ Whether to add a projection layer before the depth estimation head.
97
+ use_auxiliary_head (`bool`, *optional*, defaults to `True`):
98
+ Whether to use an auxiliary head during training.
99
+ auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
100
+ Weight of the cross-entropy loss of the auxiliary head.
101
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
102
+ The index that is ignored by the loss function of the semantic segmentation model.
103
+ semantic_classifier_dropout (`float`, *optional*, defaults to 0.1):
104
+ The dropout ratio for the semantic classification head.
105
+ backbone_featmap_shape (`List[int]`, *optional*, defaults to `[1, 1024, 24, 24]`):
106
+ Used only for the `hybrid` embedding type. The shape of the feature maps of the backbone.
107
+ neck_ignore_stages (`List[int]`, *optional*, defaults to `[0, 1]`):
108
+ Used only for the `hybrid` embedding type. The stages of the readout layers to ignore.
109
+ backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*):
110
+ The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to
111
+ leverage the [`AutoBackbone`] API.
112
+ backbone (`str`, *optional*):
113
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
114
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
115
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
116
+ use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
117
+ Whether to use pretrained weights for the backbone.
118
+ use_timm_backbone (`bool`, *optional*, defaults to `False`):
119
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
120
+ library.
121
+ backbone_kwargs (`dict`, *optional*):
122
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
123
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
124
+
125
+ Example:
126
+
127
+ ```python
128
+ >>> from transformers import DPTModel, DPTConfig
129
+
130
+ >>> # Initializing a DPT dpt-large style configuration
131
+ >>> configuration = DPTConfig()
132
+
133
+ >>> # Initializing a model from the dpt-large style configuration
134
+ >>> model = DPTModel(configuration)
135
+
136
+ >>> # Accessing the model configuration
137
+ >>> configuration = model.config
138
+ ```"""
139
+
140
+ model_type = "dpt"
141
+
142
+ def __init__(
143
+ self,
144
+ hidden_size=768,
145
+ num_hidden_layers=12,
146
+ num_attention_heads=12,
147
+ intermediate_size=3072,
148
+ hidden_act="gelu",
149
+ hidden_dropout_prob=0.0,
150
+ attention_probs_dropout_prob=0.0,
151
+ initializer_range=0.02,
152
+ layer_norm_eps=1e-12,
153
+ image_size=384,
154
+ patch_size=16,
155
+ num_channels=3,
156
+ is_hybrid=False,
157
+ qkv_bias=True,
158
+ backbone_out_indices=[2, 5, 8, 11],
159
+ readout_type="project",
160
+ reassemble_factors=[4, 2, 1, 0.5],
161
+ neck_hidden_sizes=[96, 192, 384, 768],
162
+ fusion_hidden_size=256,
163
+ head_in_index=-1,
164
+ use_batch_norm_in_fusion_residual=False,
165
+ use_bias_in_fusion_residual=None,
166
+ add_projection=False,
167
+ use_auxiliary_head=True,
168
+ auxiliary_loss_weight=0.4,
169
+ semantic_loss_ignore_index=255,
170
+ semantic_classifier_dropout=0.1,
171
+ backbone_featmap_shape=[1, 1024, 24, 24],
172
+ neck_ignore_stages=[0, 1],
173
+ backbone_config=None,
174
+ backbone=None,
175
+ use_pretrained_backbone=False,
176
+ use_timm_backbone=False,
177
+ backbone_kwargs=None,
178
+ **kwargs,
179
+ ):
180
+ super().__init__(**kwargs)
181
+
182
+ self.hidden_size = hidden_size
183
+ self.is_hybrid = is_hybrid
184
+
185
+ if use_pretrained_backbone:
186
+ raise ValueError("Pretrained backbones are not supported yet.")
187
+
188
+ use_autobackbone = False
189
+ if self.is_hybrid:
190
+ if backbone_config is None and backbone is None:
191
+ logger.info("Initializing the config with a `BiT` backbone.")
192
+ backbone_config = {
193
+ "global_padding": "same",
194
+ "layer_type": "bottleneck",
195
+ "depths": [3, 4, 9],
196
+ "out_features": ["stage1", "stage2", "stage3"],
197
+ "embedding_dynamic_padding": True,
198
+ }
199
+ backbone_config = BitConfig(**backbone_config)
200
+ elif isinstance(backbone_config, dict):
201
+ logger.info("Initializing the config with a `BiT` backbone.")
202
+ backbone_config = BitConfig(**backbone_config)
203
+ elif isinstance(backbone_config, PretrainedConfig):
204
+ backbone_config = backbone_config
205
+ else:
206
+ raise ValueError(
207
+ f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}."
208
+ )
209
+ self.backbone_config = backbone_config
210
+ self.backbone_featmap_shape = backbone_featmap_shape
211
+ self.neck_ignore_stages = neck_ignore_stages
212
+
213
+ if readout_type != "project":
214
+ raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.")
215
+
216
+ elif backbone_config is not None:
217
+ use_autobackbone = True
218
+
219
+ if isinstance(backbone_config, dict):
220
+ backbone_model_type = backbone_config.get("model_type")
221
+ config_class = CONFIG_MAPPING[backbone_model_type]
222
+ backbone_config = config_class.from_dict(backbone_config)
223
+
224
+ self.backbone_config = backbone_config
225
+ self.backbone_featmap_shape = None
226
+ self.neck_ignore_stages = []
227
+ else:
228
+ self.backbone_config = backbone_config
229
+ self.backbone_featmap_shape = None
230
+ self.neck_ignore_stages = []
231
+
232
+ if use_autobackbone and backbone_config is not None and backbone is not None:
233
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
234
+
235
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
236
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
237
+
238
+ self.backbone = backbone
239
+ self.use_pretrained_backbone = use_pretrained_backbone
240
+ self.use_timm_backbone = use_timm_backbone
241
+ self.backbone_kwargs = backbone_kwargs
242
+ self.num_hidden_layers = None if use_autobackbone else num_hidden_layers
243
+ self.num_attention_heads = None if use_autobackbone else num_attention_heads
244
+ self.intermediate_size = None if use_autobackbone else intermediate_size
245
+ self.hidden_dropout_prob = None if use_autobackbone else hidden_dropout_prob
246
+ self.attention_probs_dropout_prob = None if use_autobackbone else attention_probs_dropout_prob
247
+ self.layer_norm_eps = None if use_autobackbone else layer_norm_eps
248
+ self.image_size = None if use_autobackbone else image_size
249
+ self.patch_size = None if use_autobackbone else patch_size
250
+ self.num_channels = None if use_autobackbone else num_channels
251
+ self.qkv_bias = None if use_autobackbone else qkv_bias
252
+ self.backbone_out_indices = None if use_autobackbone else backbone_out_indices
253
+
254
+ if readout_type not in ["ignore", "add", "project"]:
255
+ raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']")
256
+ self.hidden_act = hidden_act
257
+ self.initializer_range = initializer_range
258
+ self.readout_type = readout_type
259
+ self.reassemble_factors = reassemble_factors
260
+ self.neck_hidden_sizes = neck_hidden_sizes
261
+ self.fusion_hidden_size = fusion_hidden_size
262
+ self.head_in_index = head_in_index
263
+ self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual
264
+ self.use_bias_in_fusion_residual = use_bias_in_fusion_residual
265
+ self.add_projection = add_projection
266
+
267
+ # auxiliary head attributes (semantic segmentation)
268
+ self.use_auxiliary_head = use_auxiliary_head
269
+ self.auxiliary_loss_weight = auxiliary_loss_weight
270
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
271
+ self.semantic_classifier_dropout = semantic_classifier_dropout
272
+
273
+ def to_dict(self):
274
+ """
275
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns:
276
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
277
+ """
278
+ output = copy.deepcopy(self.__dict__)
279
+
280
+ if output["backbone_config"] is not None:
281
+ output["backbone_config"] = self.backbone_config.to_dict()
282
+
283
+ output["model_type"] = self.__class__.model_type
284
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_beit_to_hf.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DPT 3.1 checkpoints from the MiDaS repository. URL: https://github.com/isl-org/MiDaS"""
16
+
17
+
18
+ import argparse
19
+ from pathlib import Path
20
+
21
+ import requests
22
+ import torch
23
+ from PIL import Image
24
+
25
+ from transformers import BeitConfig, DPTConfig, DPTForDepthEstimation, DPTImageProcessor
26
+ from transformers.utils import logging
27
+
28
+
29
+ logging.set_verbosity_info()
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ def get_dpt_config(model_name):
34
+ hidden_size = 768
35
+ num_hidden_layers = 12
36
+ num_attention_heads = 12
37
+ intermediate_size = 3072
38
+ out_features = ["stage3", "stage6", "stage9", "stage12"] # beit-base-384 uses [2, 5, 8, 11]
39
+
40
+ if "large" in model_name:
41
+ hidden_size = 1024
42
+ num_hidden_layers = 24
43
+ num_attention_heads = 16
44
+ intermediate_size = 4096
45
+ out_features = ["stage6", "stage12", "stage18", "stage24"] # beit-large-512 uses [5, 11, 17, 23]
46
+
47
+ if "512" in model_name:
48
+ image_size = 512
49
+ elif "384" in model_name:
50
+ image_size = 384
51
+ else:
52
+ raise ValueError("Model not supported")
53
+
54
+ backbone_config = BeitConfig(
55
+ image_size=image_size,
56
+ num_hidden_layers=num_hidden_layers,
57
+ hidden_size=hidden_size,
58
+ intermediate_size=intermediate_size,
59
+ num_attention_heads=num_attention_heads,
60
+ use_relative_position_bias=True,
61
+ reshape_hidden_states=False,
62
+ out_features=out_features,
63
+ )
64
+
65
+ neck_hidden_sizes = [256, 512, 1024, 1024] if "large" in model_name else [96, 192, 384, 768]
66
+ config = DPTConfig(backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes)
67
+
68
+ return config, image_size
69
+
70
+
71
+ # here we list all keys to be renamed (original name on the left, our name on the right)
72
+ def create_rename_keys(config):
73
+ rename_keys = []
74
+
75
+ # fmt: off
76
+ # stem
77
+ rename_keys.append(("pretrained.model.cls_token", "backbone.embeddings.cls_token"))
78
+ rename_keys.append(("pretrained.model.patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight"))
79
+ rename_keys.append(("pretrained.model.patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias"))
80
+
81
+ # Transfomer encoder
82
+ for i in range(config.backbone_config.num_hidden_layers):
83
+ rename_keys.append((f"pretrained.model.blocks.{i}.gamma_1", f"backbone.encoder.layer.{i}.lambda_1"))
84
+ rename_keys.append((f"pretrained.model.blocks.{i}.gamma_2", f"backbone.encoder.layer.{i}.lambda_2"))
85
+ rename_keys.append((f"pretrained.model.blocks.{i}.norm1.weight", f"backbone.encoder.layer.{i}.layernorm_before.weight"))
86
+ rename_keys.append((f"pretrained.model.blocks.{i}.norm1.bias", f"backbone.encoder.layer.{i}.layernorm_before.bias"))
87
+ rename_keys.append((f"pretrained.model.blocks.{i}.norm2.weight", f"backbone.encoder.layer.{i}.layernorm_after.weight"))
88
+ rename_keys.append((f"pretrained.model.blocks.{i}.norm2.bias", f"backbone.encoder.layer.{i}.layernorm_after.bias"))
89
+ rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc1.weight", f"backbone.encoder.layer.{i}.intermediate.dense.weight"))
90
+ rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc1.bias", f"backbone.encoder.layer.{i}.intermediate.dense.bias"))
91
+ rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc2.weight", f"backbone.encoder.layer.{i}.output.dense.weight"))
92
+ rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc2.bias", f"backbone.encoder.layer.{i}.output.dense.bias"))
93
+ rename_keys.append((f"pretrained.model.blocks.{i}.attn.proj.weight", f"backbone.encoder.layer.{i}.attention.output.dense.weight"))
94
+ rename_keys.append((f"pretrained.model.blocks.{i}.attn.proj.bias", f"backbone.encoder.layer.{i}.attention.output.dense.bias"))
95
+ rename_keys.append((f"pretrained.model.blocks.{i}.attn.relative_position_bias_table", f"backbone.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"))
96
+ rename_keys.append((f"pretrained.model.blocks.{i}.attn.relative_position_index", f"backbone.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"))
97
+
98
+ # activation postprocessing (readout projections + resize blocks)
99
+ for i in range(4):
100
+ rename_keys.append((f"pretrained.act_postprocess{i+1}.0.project.0.weight", f"neck.reassemble_stage.readout_projects.{i}.0.weight"))
101
+ rename_keys.append((f"pretrained.act_postprocess{i+1}.0.project.0.bias", f"neck.reassemble_stage.readout_projects.{i}.0.bias"))
102
+
103
+ rename_keys.append((f"pretrained.act_postprocess{i+1}.3.weight", f"neck.reassemble_stage.layers.{i}.projection.weight"))
104
+ rename_keys.append((f"pretrained.act_postprocess{i+1}.3.bias", f"neck.reassemble_stage.layers.{i}.projection.bias"))
105
+
106
+ if i != 2:
107
+ rename_keys.append((f"pretrained.act_postprocess{i+1}.4.weight", f"neck.reassemble_stage.layers.{i}.resize.weight"))
108
+ rename_keys.append((f"pretrained.act_postprocess{i+1}.4.bias", f"neck.reassemble_stage.layers.{i}.resize.bias"))
109
+
110
+ # refinenet (tricky here)
111
+ mapping = {1:3, 2:2, 3:1, 4:0}
112
+
113
+ for i in range(1, 5):
114
+ j = mapping[i]
115
+ rename_keys.append((f"scratch.refinenet{i}.out_conv.weight", f"neck.fusion_stage.layers.{j}.projection.weight"))
116
+ rename_keys.append((f"scratch.refinenet{i}.out_conv.bias", f"neck.fusion_stage.layers.{j}.projection.bias"))
117
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight"))
118
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias"))
119
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight"))
120
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias"))
121
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight"))
122
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias"))
123
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight"))
124
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias"))
125
+
126
+ # scratch convolutions
127
+ for i in range(4):
128
+ rename_keys.append((f"scratch.layer{i+1}_rn.weight", f"neck.convs.{i}.weight"))
129
+
130
+ # head
131
+ for i in range(0, 5, 2):
132
+ rename_keys.append((f"scratch.output_conv.{i}.weight", f"head.head.{i}.weight"))
133
+ rename_keys.append((f"scratch.output_conv.{i}.bias", f"head.head.{i}.bias"))
134
+
135
+ return rename_keys
136
+
137
+
138
+ def remove_ignore_keys_(state_dict):
139
+ ignore_keys = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
140
+ for k in ignore_keys:
141
+ state_dict.pop(k, None)
142
+
143
+
144
+ # we split up the matrix of each encoder layer into queries, keys and values
145
+ def read_in_q_k_v(state_dict, config):
146
+ hidden_size = config.backbone_config.hidden_size
147
+ for i in range(config.backbone_config.num_hidden_layers):
148
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
149
+ in_proj_weight = state_dict.pop(f"pretrained.model.blocks.{i}.attn.qkv.weight")
150
+ q_bias = state_dict.pop(f"pretrained.model.blocks.{i}.attn.q_bias")
151
+ v_bias = state_dict.pop(f"pretrained.model.blocks.{i}.attn.v_bias")
152
+ # next, add query, keys and values (in that order) to the state dict
153
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :]
154
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
155
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
156
+ hidden_size : hidden_size * 2, :
157
+ ]
158
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :]
159
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
160
+
161
+
162
+ def rename_key(dct, old, new):
163
+ val = dct.pop(old)
164
+ dct[new] = val
165
+
166
+
167
+ # We will verify our results on an image of cute cats
168
+ def prepare_img():
169
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
170
+ im = Image.open(requests.get(url, stream=True).raw)
171
+ return im
172
+
173
+
174
+ @torch.no_grad()
175
+ def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
176
+ """
177
+ Copy/paste/tweak model's weights to our DPT structure.
178
+ """
179
+
180
+ name_to_url = {
181
+ "dpt-beit-large-512": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt",
182
+ "dpt-beit-large-384": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt",
183
+ "dpt-beit-base-384": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt",
184
+ }
185
+
186
+ # define DPT configuration based on URL
187
+ checkpoint_url = name_to_url[model_name]
188
+ config, image_size = get_dpt_config(model_name)
189
+ # load original state_dict from URL
190
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
191
+ # remove certain keys
192
+ remove_ignore_keys_(state_dict)
193
+ # rename keys
194
+ rename_keys = create_rename_keys(config)
195
+ for src, dest in rename_keys:
196
+ rename_key(state_dict, src, dest)
197
+ # read in qkv matrices
198
+ read_in_q_k_v(state_dict, config)
199
+
200
+ # load HuggingFace model
201
+ model = DPTForDepthEstimation(config)
202
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
203
+ print("Missing keys:", missing_keys)
204
+ print("Unexpected keys:", unexpected_keys)
205
+ assert missing_keys == []
206
+ # assert unexpected_keys == ["pretrained.model.fc_norm.weight", "pretrained.model.fc_norm.bias"]
207
+ model.eval()
208
+
209
+ # Check outputs on an image
210
+ # We set `keep_aspect_ratio=False` as our current BEiT does not support arbitrary window sizes
211
+ processor = DPTImageProcessor(
212
+ size={"height": image_size, "width": image_size}, keep_aspect_ratio=False, ensure_multiple_of=32
213
+ )
214
+
215
+ image = prepare_img()
216
+ pixel_values = processor(image, return_tensors="pt").pixel_values
217
+
218
+ print("First values of pixel values:", pixel_values[0, 0, :3, :3])
219
+ print("Mean of pixel values:", pixel_values.mean().item())
220
+ print("Shape of pixel values:", pixel_values.shape)
221
+
222
+ import requests
223
+ from PIL import Image
224
+ from torchvision import transforms
225
+
226
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
227
+ image = Image.open(requests.get(url, stream=True).raw)
228
+
229
+ transforms = transforms.Compose(
230
+ [
231
+ transforms.Resize((image_size, image_size)),
232
+ transforms.ToTensor(),
233
+ ]
234
+ )
235
+ pixel_values = transforms(image).unsqueeze(0)
236
+
237
+ # forward pass
238
+ with torch.no_grad():
239
+ outputs = model(pixel_values)
240
+
241
+ predicted_depth = outputs.predicted_depth
242
+
243
+ print("Shape of predicted depth:", predicted_depth.shape)
244
+ print("First values of predicted depth:", predicted_depth[0, :3, :3])
245
+
246
+ # assert logits
247
+ # TODO there's still a small difference with the original logits
248
+ if model_name == "dpt-beit-large-512":
249
+ # OK, checked
250
+ expected_shape = torch.Size([1, 512, 512])
251
+ expected_slice = torch.tensor(
252
+ [[2804.6260, 2792.5708, 2812.9263], [2772.0288, 2780.1118, 2796.2529], [2748.1094, 2766.6558, 2766.9834]]
253
+ )
254
+ elif model_name == "dpt-beit-large-384":
255
+ # OK, checked
256
+ expected_shape = torch.Size([1, 384, 384])
257
+ expected_slice = torch.tensor(
258
+ [[1783.2273, 1780.5729, 1792.6453], [1759.9817, 1765.5359, 1778.5002], [1739.1633, 1754.7903, 1757.1990]],
259
+ )
260
+ elif model_name == "dpt-beit-base-384":
261
+ # OK, checked
262
+ expected_shape = torch.Size([1, 384, 384])
263
+ expected_slice = torch.tensor(
264
+ [[2898.4482, 2891.3750, 2904.8079], [2858.6685, 2877.2615, 2894.4507], [2842.1235, 2854.1023, 2861.6328]],
265
+ )
266
+
267
+ assert predicted_depth.shape == torch.Size(expected_shape)
268
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice)
269
+ print("Looks ok!")
270
+
271
+ if pytorch_dump_folder_path is not None:
272
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
273
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
274
+ model.save_pretrained(pytorch_dump_folder_path)
275
+ processor.save_pretrained(pytorch_dump_folder_path)
276
+
277
+ if push_to_hub:
278
+ print("Pushing model and processor to hub...")
279
+ model.push_to_hub(repo_id=f"nielsr/{model_name}")
280
+ processor.push_to_hub(repo_id=f"nielsr/{model_name}")
281
+
282
+
283
+ if __name__ == "__main__":
284
+ parser = argparse.ArgumentParser()
285
+ # Required parameters
286
+ parser.add_argument(
287
+ "--model_name",
288
+ default="dpt-beit-large-512",
289
+ type=str,
290
+ choices=["dpt-beit-large-512", "dpt-beit-large-384", "dpt-beit-base-384"],
291
+ help="Name of the model you'd like to convert.",
292
+ )
293
+ parser.add_argument(
294
+ "--pytorch_dump_folder_path",
295
+ default=None,
296
+ type=str,
297
+ help="Path to the output PyTorch model directory.",
298
+ )
299
+ parser.add_argument(
300
+ "--push_to_hub",
301
+ action="store_true",
302
+ help="Whether to push the model to the hub after conversion.",
303
+ )
304
+
305
+ args = parser.parse_args()
306
+ convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DPT checkpoints from the original repository. URL: https://github.com/isl-org/DPT"""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import cached_download, hf_hub_url
25
+ from PIL import Image
26
+
27
+ from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def get_dpt_config(checkpoint_url):
36
+ config = DPTConfig(embedding_type="hybrid")
37
+
38
+ if "large" in checkpoint_url:
39
+ config.hidden_size = 1024
40
+ config.intermediate_size = 4096
41
+ config.num_hidden_layers = 24
42
+ config.num_attention_heads = 16
43
+ config.backbone_out_indices = [5, 11, 17, 23]
44
+ config.neck_hidden_sizes = [256, 512, 1024, 1024]
45
+ expected_shape = (1, 384, 384)
46
+
47
+ if "nyu" or "midas" in checkpoint_url:
48
+ config.hidden_size = 768
49
+ config.reassemble_factors = [1, 1, 1, 0.5]
50
+ config.neck_hidden_sizes = [256, 512, 768, 768]
51
+ config.num_labels = 150
52
+ config.patch_size = 16
53
+ expected_shape = (1, 384, 384)
54
+ config.use_batch_norm_in_fusion_residual = False
55
+ config.readout_type = "project"
56
+
57
+ if "ade" in checkpoint_url:
58
+ config.use_batch_norm_in_fusion_residual = True
59
+ config.hidden_size = 768
60
+ config.reassemble_stage = [1, 1, 1, 0.5]
61
+ config.num_labels = 150
62
+ config.patch_size = 16
63
+ repo_id = "huggingface/label-files"
64
+ filename = "ade20k-id2label.json"
65
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
66
+ id2label = {int(k): v for k, v in id2label.items()}
67
+ config.id2label = id2label
68
+ config.label2id = {v: k for k, v in id2label.items()}
69
+ expected_shape = [1, 150, 480, 480]
70
+
71
+ return config, expected_shape
72
+
73
+
74
+ def remove_ignore_keys_(state_dict):
75
+ ignore_keys = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
76
+ for k in ignore_keys:
77
+ state_dict.pop(k, None)
78
+
79
+
80
+ def rename_key(name):
81
+ if (
82
+ "pretrained.model" in name
83
+ and "cls_token" not in name
84
+ and "pos_embed" not in name
85
+ and "patch_embed" not in name
86
+ ):
87
+ name = name.replace("pretrained.model", "dpt.encoder")
88
+ if "pretrained.model" in name:
89
+ name = name.replace("pretrained.model", "dpt.embeddings")
90
+ if "patch_embed" in name:
91
+ name = name.replace("patch_embed", "")
92
+ if "pos_embed" in name:
93
+ name = name.replace("pos_embed", "position_embeddings")
94
+ if "attn.proj" in name:
95
+ name = name.replace("attn.proj", "attention.output.dense")
96
+ if "proj" in name and "project" not in name:
97
+ name = name.replace("proj", "projection")
98
+ if "blocks" in name:
99
+ name = name.replace("blocks", "layer")
100
+ if "mlp.fc1" in name:
101
+ name = name.replace("mlp.fc1", "intermediate.dense")
102
+ if "mlp.fc2" in name:
103
+ name = name.replace("mlp.fc2", "output.dense")
104
+ if "norm1" in name and "backbone" not in name:
105
+ name = name.replace("norm1", "layernorm_before")
106
+ if "norm2" in name and "backbone" not in name:
107
+ name = name.replace("norm2", "layernorm_after")
108
+ if "scratch.output_conv" in name:
109
+ name = name.replace("scratch.output_conv", "head")
110
+ if "scratch" in name:
111
+ name = name.replace("scratch", "neck")
112
+ if "layer1_rn" in name:
113
+ name = name.replace("layer1_rn", "convs.0")
114
+ if "layer2_rn" in name:
115
+ name = name.replace("layer2_rn", "convs.1")
116
+ if "layer3_rn" in name:
117
+ name = name.replace("layer3_rn", "convs.2")
118
+ if "layer4_rn" in name:
119
+ name = name.replace("layer4_rn", "convs.3")
120
+ if "refinenet" in name:
121
+ layer_idx = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
122
+ # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
123
+ name = name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx-4)}")
124
+ if "out_conv" in name:
125
+ name = name.replace("out_conv", "projection")
126
+ if "resConfUnit1" in name:
127
+ name = name.replace("resConfUnit1", "residual_layer1")
128
+ if "resConfUnit2" in name:
129
+ name = name.replace("resConfUnit2", "residual_layer2")
130
+ if "conv1" in name:
131
+ name = name.replace("conv1", "convolution1")
132
+ if "conv2" in name:
133
+ name = name.replace("conv2", "convolution2")
134
+ # readout blocks
135
+ if "pretrained.act_postprocess1.0.project.0" in name:
136
+ name = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0")
137
+ if "pretrained.act_postprocess2.0.project.0" in name:
138
+ name = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0")
139
+ if "pretrained.act_postprocess3.0.project.0" in name:
140
+ name = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0")
141
+ if "pretrained.act_postprocess4.0.project.0" in name:
142
+ name = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0")
143
+
144
+ # resize blocks
145
+ if "pretrained.act_postprocess1.3" in name:
146
+ name = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection")
147
+ if "pretrained.act_postprocess1.4" in name:
148
+ name = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize")
149
+ if "pretrained.act_postprocess2.3" in name:
150
+ name = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection")
151
+ if "pretrained.act_postprocess2.4" in name:
152
+ name = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize")
153
+ if "pretrained.act_postprocess3.3" in name:
154
+ name = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection")
155
+ if "pretrained.act_postprocess4.3" in name:
156
+ name = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection")
157
+ if "pretrained.act_postprocess4.4" in name:
158
+ name = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize")
159
+ if "pretrained" in name:
160
+ name = name.replace("pretrained", "dpt")
161
+ if "bn" in name:
162
+ name = name.replace("bn", "batch_norm")
163
+ if "head" in name:
164
+ name = name.replace("head", "head.head")
165
+ if "encoder.norm" in name:
166
+ name = name.replace("encoder.norm", "layernorm")
167
+ if "auxlayer" in name:
168
+ name = name.replace("auxlayer", "auxiliary_head.head")
169
+ if "backbone" in name:
170
+ name = name.replace("backbone", "backbone.bit.encoder")
171
+
172
+ if ".." in name:
173
+ name = name.replace("..", ".")
174
+
175
+ if "stem.conv" in name:
176
+ name = name.replace("stem.conv", "bit.embedder.convolution")
177
+ if "blocks" in name:
178
+ name = name.replace("blocks", "layers")
179
+ if "convolution" in name and "backbone" in name:
180
+ name = name.replace("convolution", "conv")
181
+ if "layer" in name and "backbone" in name:
182
+ name = name.replace("layer", "layers")
183
+ if "backbone.bit.encoder.bit" in name:
184
+ name = name.replace("backbone.bit.encoder.bit", "backbone.bit")
185
+ if "embedder.conv" in name:
186
+ name = name.replace("embedder.conv", "embedder.convolution")
187
+ if "backbone.bit.encoder.stem.norm" in name:
188
+ name = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm")
189
+ return name
190
+
191
+
192
+ # we split up the matrix of each encoder layer into queries, keys and values
193
+ def read_in_q_k_v(state_dict, config):
194
+ for i in range(config.num_hidden_layers):
195
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
196
+ in_proj_weight = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight")
197
+ in_proj_bias = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias")
198
+ # next, add query, keys and values (in that order) to the state dict
199
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
200
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
201
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
202
+ config.hidden_size : config.hidden_size * 2, :
203
+ ]
204
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
205
+ config.hidden_size : config.hidden_size * 2
206
+ ]
207
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
208
+ -config.hidden_size :, :
209
+ ]
210
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
211
+
212
+
213
+ # We will verify our results on an image of cute cats
214
+ def prepare_img():
215
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
216
+ im = Image.open(requests.get(url, stream=True).raw)
217
+ return im
218
+
219
+
220
+ @torch.no_grad()
221
+ def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub, model_name, show_prediction):
222
+ """
223
+ Copy/paste/tweak model's weights to our DPT structure.
224
+ """
225
+
226
+ # define DPT configuration based on URL
227
+ config, expected_shape = get_dpt_config(checkpoint_url)
228
+ # load original state_dict from URL
229
+ # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
230
+ state_dict = torch.load(checkpoint_url, map_location="cpu")
231
+ # remove certain keys
232
+ remove_ignore_keys_(state_dict)
233
+ # rename keys
234
+ for key in state_dict.copy().keys():
235
+ val = state_dict.pop(key)
236
+ state_dict[rename_key(key)] = val
237
+ # read in qkv matrices
238
+ read_in_q_k_v(state_dict, config)
239
+
240
+ # load HuggingFace model
241
+ model = DPTForSemanticSegmentation(config) if "ade" in checkpoint_url else DPTForDepthEstimation(config)
242
+ model.load_state_dict(state_dict)
243
+ model.eval()
244
+
245
+ # Check outputs on an image
246
+ size = 480 if "ade" in checkpoint_url else 384
247
+ image_processor = DPTImageProcessor(size=size)
248
+
249
+ image = prepare_img()
250
+ encoding = image_processor(image, return_tensors="pt")
251
+
252
+ # forward pass
253
+ outputs = model(**encoding).logits if "ade" in checkpoint_url else model(**encoding).predicted_depth
254
+
255
+ if show_prediction:
256
+ prediction = (
257
+ torch.nn.functional.interpolate(
258
+ outputs.unsqueeze(1),
259
+ size=(image.size[1], image.size[0]),
260
+ mode="bicubic",
261
+ align_corners=False,
262
+ )
263
+ .squeeze()
264
+ .cpu()
265
+ .numpy()
266
+ )
267
+
268
+ Image.fromarray((prediction / prediction.max()) * 255).show()
269
+
270
+ if pytorch_dump_folder_path is not None:
271
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
272
+ print(f"Saving model to {pytorch_dump_folder_path}")
273
+ model.save_pretrained(pytorch_dump_folder_path)
274
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
275
+ image_processor.save_pretrained(pytorch_dump_folder_path)
276
+
277
+ if push_to_hub:
278
+ model.push_to_hub("ybelkada/dpt-hybrid-midas")
279
+ image_processor.push_to_hub("ybelkada/dpt-hybrid-midas")
280
+
281
+
282
+ if __name__ == "__main__":
283
+ parser = argparse.ArgumentParser()
284
+ # Required parameters
285
+ parser.add_argument(
286
+ "--checkpoint_url",
287
+ default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
288
+ type=str,
289
+ help="URL of the original DPT checkpoint you'd like to convert.",
290
+ )
291
+ parser.add_argument(
292
+ "--pytorch_dump_folder_path",
293
+ default=None,
294
+ type=str,
295
+ required=False,
296
+ help="Path to the output PyTorch model directory.",
297
+ )
298
+ parser.add_argument(
299
+ "--push_to_hub",
300
+ action="store_true",
301
+ )
302
+ parser.add_argument(
303
+ "--model_name",
304
+ default="dpt-large",
305
+ type=str,
306
+ help="Name of the model, in case you're pushing to the hub.",
307
+ )
308
+ parser.add_argument(
309
+ "--show_prediction",
310
+ action="store_true",
311
+ )
312
+
313
+ args = parser.parse_args()
314
+ convert_dpt_checkpoint(
315
+ args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
316
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_swinv2_to_hf.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DPT 3.1 checkpoints from the MiDaS repository. URL: https://github.com/isl-org/MiDaS"""
16
+
17
+
18
+ import argparse
19
+ from pathlib import Path
20
+
21
+ import requests
22
+ import torch
23
+ from PIL import Image
24
+
25
+ from transformers import DPTConfig, DPTForDepthEstimation, DPTImageProcessor, Swinv2Config
26
+ from transformers.utils import logging
27
+
28
+
29
+ logging.set_verbosity_info()
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ def get_dpt_config(model_name):
34
+ if "tiny" in model_name:
35
+ embed_dim = 96
36
+ depths = (2, 2, 6, 2)
37
+ num_heads = (3, 6, 12, 24)
38
+ window_size = 16
39
+ # note: for Swinv2-tiny authors used the window_size = 16 variant
40
+ # as seen here: https://github.com/isl-org/MiDaS/blob/bdc4ed64c095e026dc0a2f17cabb14d58263decb/midas/backbones/swin2.py#L26
41
+ pretrained_window_sizes = (0, 0, 0, 0)
42
+ elif "base" in model_name:
43
+ embed_dim = 128
44
+ depths = (2, 2, 18, 2)
45
+ num_heads = (4, 8, 16, 32)
46
+ window_size = 24
47
+ pretrained_window_sizes = (12, 12, 12, 6)
48
+ elif "large" in model_name:
49
+ embed_dim = 192
50
+ depths = (2, 2, 18, 2)
51
+ num_heads = (6, 12, 24, 48)
52
+ window_size = 24
53
+ pretrained_window_sizes = (12, 12, 12, 6)
54
+
55
+ if "384" in model_name:
56
+ image_size = 384
57
+ elif "256" in model_name:
58
+ image_size = 256
59
+ else:
60
+ raise ValueError("Model not supported, to do")
61
+
62
+ backbone_config = Swinv2Config(
63
+ image_size=image_size,
64
+ embed_dim=embed_dim,
65
+ depths=depths,
66
+ window_size=window_size,
67
+ pretrained_window_sizes=pretrained_window_sizes,
68
+ num_heads=num_heads,
69
+ out_features=["stage1", "stage2", "stage3", "stage4"],
70
+ )
71
+
72
+ if model_name == "dpt-swinv2-tiny-256":
73
+ neck_hidden_sizes = [96, 192, 384, 768]
74
+ elif model_name == "dpt-swinv2-base-384":
75
+ neck_hidden_sizes = [128, 256, 512, 1024]
76
+ elif model_name == "dpt-swinv2-large-384":
77
+ neck_hidden_sizes = [192, 384, 768, 1536]
78
+
79
+ config = DPTConfig(backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes)
80
+
81
+ return config, image_size
82
+
83
+
84
+ # here we list all keys to be renamed (original name on the left, our name on the right)
85
+ def create_rename_keys(config):
86
+ rename_keys = []
87
+
88
+ # fmt: off
89
+ # stem
90
+ rename_keys.append(("pretrained.model.patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight"))
91
+ rename_keys.append(("pretrained.model.patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias"))
92
+ rename_keys.append(("pretrained.model.patch_embed.norm.weight", "backbone.embeddings.norm.weight"))
93
+ rename_keys.append(("pretrained.model.patch_embed.norm.bias", "backbone.embeddings.norm.bias"))
94
+
95
+ # transformer encoder
96
+ for i in range(len(config.backbone_config.depths)):
97
+ for j in range(config.backbone_config.depths[i]):
98
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.logit_scale", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.logit_scale"))
99
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.cpb_mlp.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.continuous_position_bias_mlp.0.weight"))
100
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.cpb_mlp.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.continuous_position_bias_mlp.0.bias"))
101
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.cpb_mlp.2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.continuous_position_bias_mlp.2.weight"))
102
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.q_bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"))
103
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.v_bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"))
104
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
105
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.attn.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
106
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
107
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
108
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.mlp.fc1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
109
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.mlp.fc1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
110
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.mlp.fc2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
111
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.mlp.fc2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
112
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
113
+ rename_keys.append((f"pretrained.model.layers.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
114
+
115
+ # downsample parameters
116
+ if i in [0,1,2]:
117
+ rename_keys.append((f"pretrained.model.layers.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight"))
118
+ rename_keys.append((f"pretrained.model.layers.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight"))
119
+ rename_keys.append((f"pretrained.model.layers.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias"))
120
+
121
+ # note: non-Transformer backbones like Swinv2, LeViT et al don't require activation postprocessing (readout projections + resize blocks)
122
+
123
+ # refinenet (tricky here)
124
+ mapping = {1:3, 2:2, 3:1, 4:0}
125
+
126
+ for i in range(1, 5):
127
+ j = mapping[i]
128
+ rename_keys.append((f"scratch.refinenet{i}.out_conv.weight", f"neck.fusion_stage.layers.{j}.projection.weight"))
129
+ rename_keys.append((f"scratch.refinenet{i}.out_conv.bias", f"neck.fusion_stage.layers.{j}.projection.bias"))
130
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight"))
131
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias"))
132
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight"))
133
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias"))
134
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight"))
135
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias"))
136
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight"))
137
+ rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias"))
138
+
139
+ # scratch convolutions
140
+ for i in range(4):
141
+ rename_keys.append((f"scratch.layer{i+1}_rn.weight", f"neck.convs.{i}.weight"))
142
+
143
+ # head
144
+ for i in range(0, 5, 2):
145
+ rename_keys.append((f"scratch.output_conv.{i}.weight", f"head.head.{i}.weight"))
146
+ rename_keys.append((f"scratch.output_conv.{i}.bias", f"head.head.{i}.bias"))
147
+
148
+ return rename_keys
149
+
150
+
151
+ def remove_ignore_keys_(state_dict):
152
+ ignore_keys = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
153
+ for k in ignore_keys:
154
+ state_dict.pop(k, None)
155
+
156
+
157
+ # we split up the matrix of each encoder layer into queries, keys and values
158
+ def read_in_q_k_v(state_dict, config, model):
159
+ for i in range(len(config.backbone_config.depths)):
160
+ for j in range(config.backbone_config.depths[i]):
161
+ dim = model.backbone.encoder.layers[i].blocks[j].attention.self.all_head_size
162
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
163
+ in_proj_weight = state_dict.pop(f"pretrained.model.layers.{i}.blocks.{j}.attn.qkv.weight")
164
+ # next, add query, keys and values (in that order) to the state dict
165
+ state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :]
166
+ state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[
167
+ dim : dim * 2, :
168
+ ]
169
+ state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[
170
+ -dim:, :
171
+ ]
172
+
173
+
174
+ def rename_key(dct, old, new):
175
+ val = dct.pop(old)
176
+ dct[new] = val
177
+
178
+
179
+ # We will verify our results on an image of cute cats
180
+ def prepare_img():
181
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
182
+ im = Image.open(requests.get(url, stream=True).raw)
183
+ return im
184
+
185
+
186
+ @torch.no_grad()
187
+ def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, verify_logits, push_to_hub):
188
+ """
189
+ Copy/paste/tweak model's weights to our DPT structure.
190
+ """
191
+
192
+ name_to_url = {
193
+ "dpt-swinv2-tiny-256": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt",
194
+ "dpt-swinv2-base-384": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt",
195
+ "dpt-swinv2-large-384": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt",
196
+ }
197
+
198
+ # define DPT configuration based on URL
199
+ checkpoint_url = name_to_url[model_name]
200
+ config, image_size = get_dpt_config(model_name)
201
+ # load original state_dict from URL
202
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
203
+
204
+ # load HuggingFace model
205
+ model = DPTForDepthEstimation(config)
206
+
207
+ # remove certain keys
208
+ remove_ignore_keys_(state_dict)
209
+ # rename keys
210
+ rename_keys = create_rename_keys(config)
211
+ for src, dest in rename_keys:
212
+ rename_key(state_dict, src, dest)
213
+ # read in qkv matrices
214
+ read_in_q_k_v(state_dict, config, model)
215
+
216
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
217
+ print("Missing keys:", missing_keys)
218
+ print("Unexpected keys:", unexpected_keys)
219
+ model.eval()
220
+
221
+ # Check outputs on an image
222
+ processor = DPTImageProcessor(size={"height": image_size, "width": image_size})
223
+
224
+ image = prepare_img()
225
+ processor(image, return_tensors="pt")
226
+
227
+ if verify_logits:
228
+ from torchvision import transforms
229
+
230
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
231
+ image = Image.open(requests.get(url, stream=True).raw)
232
+
233
+ transforms = transforms.Compose(
234
+ [
235
+ transforms.Resize((image_size, image_size)),
236
+ transforms.ToTensor(),
237
+ ]
238
+ )
239
+ pixel_values = transforms(image).unsqueeze(0)
240
+
241
+ # forward pass
242
+ with torch.no_grad():
243
+ outputs = model(pixel_values)
244
+
245
+ predicted_depth = outputs.predicted_depth
246
+
247
+ print("Shape of predicted depth:", predicted_depth.shape)
248
+ print("First values of predicted depth:", predicted_depth[0, :3, :3])
249
+
250
+ # assert logits
251
+ if model_name == "dpt-swinv2-base-384":
252
+ # OK, checked
253
+ expected_shape = torch.Size([1, 384, 384])
254
+ expected_slice = torch.tensor(
255
+ [
256
+ [1998.5575, 1997.3887, 2009.2981],
257
+ [1952.8607, 1979.6488, 2001.0854],
258
+ [1953.7697, 1961.7711, 1968.8904],
259
+ ],
260
+ )
261
+ elif model_name == "dpt-swinv2-tiny-256":
262
+ # OK, checked
263
+ expected_shape = torch.Size([1, 256, 256])
264
+ expected_slice = torch.tensor(
265
+ [[978.9163, 976.5215, 978.5349], [974.1859, 971.7249, 975.8046], [971.3419, 970.3118, 971.6830]],
266
+ )
267
+ elif model_name == "dpt-swinv2-large-384":
268
+ # OK, checked
269
+ expected_shape = torch.Size([1, 384, 384])
270
+ expected_slice = torch.tensor(
271
+ [
272
+ [1203.7206, 1200.1495, 1197.8234],
273
+ [1196.2484, 1183.5033, 1186.4640],
274
+ [1178.8131, 1182.3260, 1174.3975],
275
+ ],
276
+ )
277
+
278
+ assert predicted_depth.shape == torch.Size(expected_shape)
279
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice)
280
+ print("Looks ok!")
281
+
282
+ if pytorch_dump_folder_path is not None:
283
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
284
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
285
+ model.save_pretrained(pytorch_dump_folder_path)
286
+ processor.save_pretrained(pytorch_dump_folder_path)
287
+
288
+ if push_to_hub:
289
+ print("Pushing model and processor to hub...")
290
+ model.push_to_hub(repo_id=f"Intel/{model_name}")
291
+ processor.push_to_hub(repo_id=f"Intel/{model_name}")
292
+
293
+
294
+ if __name__ == "__main__":
295
+ parser = argparse.ArgumentParser()
296
+ # Required parameters
297
+ parser.add_argument(
298
+ "--model_name",
299
+ default="dpt-swinv2-base-384",
300
+ type=str,
301
+ choices=["dpt-swinv2-tiny-256", "dpt-swinv2-base-384", "dpt-swinv2-large-384"],
302
+ help="Name of the model you'd like to convert.",
303
+ )
304
+ parser.add_argument(
305
+ "--pytorch_dump_folder_path",
306
+ default=None,
307
+ type=str,
308
+ help="Path to the output PyTorch model directory.",
309
+ )
310
+ parser.add_argument(
311
+ "--verify_logits",
312
+ action="store_true",
313
+ help="Whether to verify logits after conversion.",
314
+ )
315
+ parser.add_argument(
316
+ "--push_to_hub",
317
+ action="store_true",
318
+ help="Whether to push the model to the hub after conversion.",
319
+ )
320
+
321
+ args = parser.parse_args()
322
+ convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.verify_logits, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dpt_to_pytorch.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DPT checkpoints from the original repository. URL: https://github.com/isl-org/DPT"""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import cached_download, hf_hub_url
25
+ from PIL import Image
26
+
27
+ from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def get_dpt_config(checkpoint_url):
36
+ config = DPTConfig()
37
+
38
+ if "large" in checkpoint_url:
39
+ config.hidden_size = 1024
40
+ config.intermediate_size = 4096
41
+ config.num_hidden_layers = 24
42
+ config.num_attention_heads = 16
43
+ config.backbone_out_indices = [5, 11, 17, 23]
44
+ config.neck_hidden_sizes = [256, 512, 1024, 1024]
45
+ expected_shape = (1, 384, 384)
46
+
47
+ if "ade" in checkpoint_url:
48
+ config.use_batch_norm_in_fusion_residual = True
49
+
50
+ config.num_labels = 150
51
+ repo_id = "huggingface/label-files"
52
+ filename = "ade20k-id2label.json"
53
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
54
+ id2label = {int(k): v for k, v in id2label.items()}
55
+ config.id2label = id2label
56
+ config.label2id = {v: k for k, v in id2label.items()}
57
+ expected_shape = [1, 150, 480, 480]
58
+
59
+ return config, expected_shape
60
+
61
+
62
+ def remove_ignore_keys_(state_dict):
63
+ ignore_keys = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
64
+ for k in ignore_keys:
65
+ state_dict.pop(k, None)
66
+
67
+
68
+ def rename_key(name):
69
+ if (
70
+ "pretrained.model" in name
71
+ and "cls_token" not in name
72
+ and "pos_embed" not in name
73
+ and "patch_embed" not in name
74
+ ):
75
+ name = name.replace("pretrained.model", "dpt.encoder")
76
+ if "pretrained.model" in name:
77
+ name = name.replace("pretrained.model", "dpt.embeddings")
78
+ if "patch_embed" in name:
79
+ name = name.replace("patch_embed", "patch_embeddings")
80
+ if "pos_embed" in name:
81
+ name = name.replace("pos_embed", "position_embeddings")
82
+ if "attn.proj" in name:
83
+ name = name.replace("attn.proj", "attention.output.dense")
84
+ if "proj" in name and "project" not in name:
85
+ name = name.replace("proj", "projection")
86
+ if "blocks" in name:
87
+ name = name.replace("blocks", "layer")
88
+ if "mlp.fc1" in name:
89
+ name = name.replace("mlp.fc1", "intermediate.dense")
90
+ if "mlp.fc2" in name:
91
+ name = name.replace("mlp.fc2", "output.dense")
92
+ if "norm1" in name:
93
+ name = name.replace("norm1", "layernorm_before")
94
+ if "norm2" in name:
95
+ name = name.replace("norm2", "layernorm_after")
96
+ if "scratch.output_conv" in name:
97
+ name = name.replace("scratch.output_conv", "head")
98
+ if "scratch" in name:
99
+ name = name.replace("scratch", "neck")
100
+ if "layer1_rn" in name:
101
+ name = name.replace("layer1_rn", "convs.0")
102
+ if "layer2_rn" in name:
103
+ name = name.replace("layer2_rn", "convs.1")
104
+ if "layer3_rn" in name:
105
+ name = name.replace("layer3_rn", "convs.2")
106
+ if "layer4_rn" in name:
107
+ name = name.replace("layer4_rn", "convs.3")
108
+ if "refinenet" in name:
109
+ layer_idx = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
110
+ # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
111
+ name = name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx-4)}")
112
+ if "out_conv" in name:
113
+ name = name.replace("out_conv", "projection")
114
+ if "resConfUnit1" in name:
115
+ name = name.replace("resConfUnit1", "residual_layer1")
116
+ if "resConfUnit2" in name:
117
+ name = name.replace("resConfUnit2", "residual_layer2")
118
+ if "conv1" in name:
119
+ name = name.replace("conv1", "convolution1")
120
+ if "conv2" in name:
121
+ name = name.replace("conv2", "convolution2")
122
+ # readout blocks
123
+ if "pretrained.act_postprocess1.0.project.0" in name:
124
+ name = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0")
125
+ if "pretrained.act_postprocess2.0.project.0" in name:
126
+ name = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0")
127
+ if "pretrained.act_postprocess3.0.project.0" in name:
128
+ name = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0")
129
+ if "pretrained.act_postprocess4.0.project.0" in name:
130
+ name = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0")
131
+ # resize blocks
132
+ if "pretrained.act_postprocess1.3" in name:
133
+ name = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection")
134
+ if "pretrained.act_postprocess1.4" in name:
135
+ name = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize")
136
+ if "pretrained.act_postprocess2.3" in name:
137
+ name = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection")
138
+ if "pretrained.act_postprocess2.4" in name:
139
+ name = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize")
140
+ if "pretrained.act_postprocess3.3" in name:
141
+ name = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection")
142
+ if "pretrained.act_postprocess4.3" in name:
143
+ name = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection")
144
+ if "pretrained.act_postprocess4.4" in name:
145
+ name = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize")
146
+ if "pretrained" in name:
147
+ name = name.replace("pretrained", "dpt")
148
+ if "bn" in name:
149
+ name = name.replace("bn", "batch_norm")
150
+ if "head" in name:
151
+ name = name.replace("head", "head.head")
152
+ if "encoder.norm" in name:
153
+ name = name.replace("encoder.norm", "layernorm")
154
+ if "auxlayer" in name:
155
+ name = name.replace("auxlayer", "auxiliary_head.head")
156
+
157
+ return name
158
+
159
+
160
+ # we split up the matrix of each encoder layer into queries, keys and values
161
+ def read_in_q_k_v(state_dict, config):
162
+ for i in range(config.num_hidden_layers):
163
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
164
+ in_proj_weight = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight")
165
+ in_proj_bias = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias")
166
+ # next, add query, keys and values (in that order) to the state dict
167
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
168
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
169
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
170
+ config.hidden_size : config.hidden_size * 2, :
171
+ ]
172
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
173
+ config.hidden_size : config.hidden_size * 2
174
+ ]
175
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
176
+ -config.hidden_size :, :
177
+ ]
178
+ state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
179
+
180
+
181
+ # We will verify our results on an image of cute cats
182
+ def prepare_img():
183
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
184
+ im = Image.open(requests.get(url, stream=True).raw)
185
+ return im
186
+
187
+
188
+ @torch.no_grad()
189
+ def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub, model_name):
190
+ """
191
+ Copy/paste/tweak model's weights to our DPT structure.
192
+ """
193
+
194
+ # define DPT configuration based on URL
195
+ config, expected_shape = get_dpt_config(checkpoint_url)
196
+ # load original state_dict from URL
197
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
198
+ # remove certain keys
199
+ remove_ignore_keys_(state_dict)
200
+ # rename keys
201
+ for key in state_dict.copy().keys():
202
+ val = state_dict.pop(key)
203
+ state_dict[rename_key(key)] = val
204
+ # read in qkv matrices
205
+ read_in_q_k_v(state_dict, config)
206
+
207
+ # load HuggingFace model
208
+ model = DPTForSemanticSegmentation(config) if "ade" in checkpoint_url else DPTForDepthEstimation(config)
209
+ model.load_state_dict(state_dict)
210
+ model.eval()
211
+
212
+ # Check outputs on an image
213
+ size = 480 if "ade" in checkpoint_url else 384
214
+ image_processor = DPTImageProcessor(size=size)
215
+
216
+ image = prepare_img()
217
+ encoding = image_processor(image, return_tensors="pt")
218
+
219
+ # forward pass
220
+ outputs = model(**encoding).logits if "ade" in checkpoint_url else model(**encoding).predicted_depth
221
+
222
+ # Assert logits
223
+ expected_slice = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
224
+ if "ade" in checkpoint_url:
225
+ expected_slice = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
226
+ assert outputs.shape == torch.Size(expected_shape)
227
+ assert (
228
+ torch.allclose(outputs[0, 0, :3, :3], expected_slice, atol=1e-4)
229
+ if "ade" in checkpoint_url
230
+ else torch.allclose(outputs[0, :3, :3], expected_slice)
231
+ )
232
+ print("Looks ok!")
233
+
234
+ if pytorch_dump_folder_path is not None:
235
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
236
+ print(f"Saving model to {pytorch_dump_folder_path}")
237
+ model.save_pretrained(pytorch_dump_folder_path)
238
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
239
+ image_processor.save_pretrained(pytorch_dump_folder_path)
240
+
241
+ if push_to_hub:
242
+ print("Pushing model to hub...")
243
+ model.push_to_hub(
244
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
245
+ organization="nielsr",
246
+ commit_message="Add model",
247
+ use_temp_dir=True,
248
+ )
249
+ image_processor.push_to_hub(
250
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
251
+ organization="nielsr",
252
+ commit_message="Add image processor",
253
+ use_temp_dir=True,
254
+ )
255
+
256
+
257
+ if __name__ == "__main__":
258
+ parser = argparse.ArgumentParser()
259
+ # Required parameters
260
+ parser.add_argument(
261
+ "--checkpoint_url",
262
+ default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
263
+ type=str,
264
+ help="URL of the original DPT checkpoint you'd like to convert.",
265
+ )
266
+ parser.add_argument(
267
+ "--pytorch_dump_folder_path",
268
+ default=None,
269
+ type=str,
270
+ required=False,
271
+ help="Path to the output PyTorch model directory.",
272
+ )
273
+ parser.add_argument(
274
+ "--push_to_hub",
275
+ action="store_true",
276
+ )
277
+ parser.add_argument(
278
+ "--model_name",
279
+ default="dpt-large",
280
+ type=str,
281
+ required=False,
282
+ help="Name of the model, in case you're pushing to the hub.",
283
+ )
284
+
285
+ args = parser.parse_args()
286
+ convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/feature_extraction_dpt.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for DPT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_dpt import DPTImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class DPTFeatureExtractor(DPTImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use DPTImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/image_processing_dpt.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for DPT."""
16
+
17
+ import math
18
+ from typing import Dict, Iterable, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
23
+ from ...image_transforms import pad, resize, to_channel_dimension_format
24
+ from ...image_utils import (
25
+ IMAGENET_STANDARD_MEAN,
26
+ IMAGENET_STANDARD_STD,
27
+ ChannelDimension,
28
+ ImageInput,
29
+ PILImageResampling,
30
+ get_image_size,
31
+ infer_channel_dimension_format,
32
+ is_scaled_image,
33
+ is_torch_available,
34
+ is_torch_tensor,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, is_vision_available, logging
42
+
43
+
44
+ if is_torch_available():
45
+ import torch
46
+
47
+ if is_vision_available():
48
+ import PIL
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+
54
+ def get_resize_output_image_size(
55
+ input_image: np.ndarray,
56
+ output_size: Union[int, Iterable[int]],
57
+ keep_aspect_ratio: bool,
58
+ multiple: int,
59
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
60
+ ) -> Tuple[int, int]:
61
+ def constraint_to_multiple_of(val, multiple, min_val=0, max_val=None):
62
+ x = round(val / multiple) * multiple
63
+
64
+ if max_val is not None and x > max_val:
65
+ x = math.floor(val / multiple) * multiple
66
+
67
+ if x < min_val:
68
+ x = math.ceil(val / multiple) * multiple
69
+
70
+ return x
71
+
72
+ output_size = (output_size, output_size) if isinstance(output_size, int) else output_size
73
+
74
+ input_height, input_width = get_image_size(input_image, input_data_format)
75
+ output_height, output_width = output_size
76
+
77
+ # determine new height and width
78
+ scale_height = output_height / input_height
79
+ scale_width = output_width / input_width
80
+
81
+ if keep_aspect_ratio:
82
+ # scale as little as possible
83
+ if abs(1 - scale_width) < abs(1 - scale_height):
84
+ # fit width
85
+ scale_height = scale_width
86
+ else:
87
+ # fit height
88
+ scale_width = scale_height
89
+
90
+ new_height = constraint_to_multiple_of(scale_height * input_height, multiple=multiple)
91
+ new_width = constraint_to_multiple_of(scale_width * input_width, multiple=multiple)
92
+
93
+ return (new_height, new_width)
94
+
95
+
96
+ class DPTImageProcessor(BaseImageProcessor):
97
+ r"""
98
+ Constructs a DPT image processor.
99
+
100
+ Args:
101
+ do_resize (`bool`, *optional*, defaults to `True`):
102
+ Whether to resize the image's (height, width) dimensions. Can be overidden by `do_resize` in `preprocess`.
103
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 384, "width": 384}`):
104
+ Size of the image after resizing. Can be overidden by `size` in `preprocess`.
105
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
106
+ Defines the resampling filter to use if resizing the image. Can be overidden by `resample` in `preprocess`.
107
+ keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
108
+ If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
109
+ be overidden by `keep_aspect_ratio` in `preprocess`.
110
+ ensure_multiple_of (`int`, *optional*, defaults to 1):
111
+ If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overidden
112
+ by `ensure_multiple_of` in `preprocess`.
113
+ do_rescale (`bool`, *optional*, defaults to `True`):
114
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overidden by `do_rescale` in
115
+ `preprocess`.
116
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
117
+ Scale factor to use if rescaling the image. Can be overidden by `rescale_factor` in `preprocess`.
118
+ do_normalize (`bool`, *optional*, defaults to `True`):
119
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
120
+ method.
121
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
122
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
123
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
124
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
125
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
126
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
127
+ do_pad (`bool`, *optional*, defaults to `False`):
128
+ Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in
129
+ combination with DPT.
130
+ size_divisor (`int`, *optional*):
131
+ If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
132
+ DINOv2 paper, which uses the model in combination with DPT.
133
+ """
134
+
135
+ model_input_names = ["pixel_values"]
136
+
137
+ def __init__(
138
+ self,
139
+ do_resize: bool = True,
140
+ size: Dict[str, int] = None,
141
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
142
+ keep_aspect_ratio: bool = False,
143
+ ensure_multiple_of: int = 1,
144
+ do_rescale: bool = True,
145
+ rescale_factor: Union[int, float] = 1 / 255,
146
+ do_normalize: bool = True,
147
+ image_mean: Optional[Union[float, List[float]]] = None,
148
+ image_std: Optional[Union[float, List[float]]] = None,
149
+ do_pad: bool = False,
150
+ size_divisor: int = None,
151
+ **kwargs,
152
+ ) -> None:
153
+ super().__init__(**kwargs)
154
+ size = size if size is not None else {"height": 384, "width": 384}
155
+ size = get_size_dict(size)
156
+ self.do_resize = do_resize
157
+ self.size = size
158
+ self.keep_aspect_ratio = keep_aspect_ratio
159
+ self.ensure_multiple_of = ensure_multiple_of
160
+ self.resample = resample
161
+ self.do_rescale = do_rescale
162
+ self.rescale_factor = rescale_factor
163
+ self.do_normalize = do_normalize
164
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
165
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
166
+ self.do_pad = do_pad
167
+ self.size_divisor = size_divisor
168
+ self._valid_processor_keys = [
169
+ "images",
170
+ "do_resize",
171
+ "size",
172
+ "keep_aspect_ratio",
173
+ "ensure_multiple_of",
174
+ "resample",
175
+ "do_rescale",
176
+ "rescale_factor",
177
+ "do_normalize",
178
+ "image_mean",
179
+ "image_std",
180
+ "do_pad",
181
+ "size_divisor",
182
+ "return_tensors",
183
+ "data_format",
184
+ "input_data_format",
185
+ ]
186
+
187
+ def resize(
188
+ self,
189
+ image: np.ndarray,
190
+ size: Dict[str, int],
191
+ keep_aspect_ratio: bool = False,
192
+ ensure_multiple_of: int = 1,
193
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
194
+ data_format: Optional[Union[str, ChannelDimension]] = None,
195
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
196
+ **kwargs,
197
+ ) -> np.ndarray:
198
+ """
199
+ Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
200
+ is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
201
+ set, the image is resized to a size that is a multiple of this value.
202
+
203
+ Args:
204
+ image (`np.ndarray`):
205
+ Image to resize.
206
+ size (`Dict[str, int]`):
207
+ Target size of the output image.
208
+ keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
209
+ If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
210
+ ensure_multiple_of (`int`, *optional*, defaults to 1):
211
+ The image is resized to a size that is a multiple of this value.
212
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
213
+ Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
214
+ specified in `size`.
215
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
216
+ Resampling filter to use when resiizing the image.
217
+ data_format (`str` or `ChannelDimension`, *optional*):
218
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
219
+ input_data_format (`str` or `ChannelDimension`, *optional*):
220
+ The channel dimension format of the input image. If not provided, it will be inferred.
221
+ """
222
+ size = get_size_dict(size)
223
+ if "height" not in size or "width" not in size:
224
+ raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
225
+
226
+ output_size = get_resize_output_image_size(
227
+ image,
228
+ output_size=(size["height"], size["width"]),
229
+ keep_aspect_ratio=keep_aspect_ratio,
230
+ multiple=ensure_multiple_of,
231
+ input_data_format=input_data_format,
232
+ )
233
+ return resize(
234
+ image,
235
+ size=output_size,
236
+ resample=resample,
237
+ data_format=data_format,
238
+ input_data_format=input_data_format,
239
+ **kwargs,
240
+ )
241
+
242
+ def pad_image(
243
+ self,
244
+ image: np.array,
245
+ size_divisor: int,
246
+ data_format: Optional[Union[str, ChannelDimension]] = None,
247
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
248
+ ):
249
+ """
250
+ Center pad an image to be a multiple of `multiple`.
251
+
252
+ Args:
253
+ image (`np.ndarray`):
254
+ Image to pad.
255
+ size_divisor (`int`):
256
+ The width and height of the image will be padded to a multiple of this number.
257
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
258
+ The channel dimension format for the output image. Can be one of:
259
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
260
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
261
+ - Unset: Use the channel dimension format of the input image.
262
+ input_data_format (`ChannelDimension` or `str`, *optional*):
263
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
264
+ from the input image. Can be one of:
265
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
266
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
267
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
268
+ """
269
+
270
+ def _get_pad(size, size_divisor):
271
+ new_size = math.ceil(size / size_divisor) * size_divisor
272
+ pad_size = new_size - size
273
+ pad_size_left = pad_size // 2
274
+ pad_size_right = pad_size - pad_size_left
275
+ return pad_size_left, pad_size_right
276
+
277
+ if input_data_format is None:
278
+ input_data_format = infer_channel_dimension_format(image)
279
+
280
+ height, width = get_image_size(image, input_data_format)
281
+
282
+ pad_size_left, pad_size_right = _get_pad(height, size_divisor)
283
+ pad_size_top, pad_size_bottom = _get_pad(width, size_divisor)
284
+
285
+ return pad(image, ((pad_size_left, pad_size_right), (pad_size_top, pad_size_bottom)), data_format=data_format)
286
+
287
+ def preprocess(
288
+ self,
289
+ images: ImageInput,
290
+ do_resize: bool = None,
291
+ size: int = None,
292
+ keep_aspect_ratio: bool = None,
293
+ ensure_multiple_of: int = None,
294
+ resample: PILImageResampling = None,
295
+ do_rescale: bool = None,
296
+ rescale_factor: float = None,
297
+ do_normalize: bool = None,
298
+ image_mean: Optional[Union[float, List[float]]] = None,
299
+ image_std: Optional[Union[float, List[float]]] = None,
300
+ do_pad: bool = None,
301
+ size_divisor: int = None,
302
+ return_tensors: Optional[Union[str, TensorType]] = None,
303
+ data_format: ChannelDimension = ChannelDimension.FIRST,
304
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
305
+ **kwargs,
306
+ ) -> PIL.Image.Image:
307
+ """
308
+ Preprocess an image or batch of images.
309
+
310
+ Args:
311
+ images (`ImageInput`):
312
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
313
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
314
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
315
+ Whether to resize the image.
316
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
317
+ Size of the image after reszing. If `keep_aspect_ratio` is `True`, the image is resized to the largest
318
+ possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is
319
+ resized to a size that is a multiple of this value.
320
+ keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
321
+ Whether to keep the aspect ratio of the image. If False, the image will be resized to (size, size). If
322
+ True, the image will be resized to keep the aspect ratio and the size will be the maximum possible.
323
+ ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
324
+ Ensure that the image size is a multiple of this value.
325
+ resample (`int`, *optional*, defaults to `self.resample`):
326
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
327
+ has an effect if `do_resize` is set to `True`.
328
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
329
+ Whether to rescale the image values between [0 - 1].
330
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
331
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
332
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
333
+ Whether to normalize the image.
334
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
335
+ Image mean.
336
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
337
+ Image standard deviation.
338
+ return_tensors (`str` or `TensorType`, *optional*):
339
+ The type of tensors to return. Can be one of:
340
+ - Unset: Return a list of `np.ndarray`.
341
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
342
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
343
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
344
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
345
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
346
+ The channel dimension format for the output image. Can be one of:
347
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
348
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
349
+ input_data_format (`ChannelDimension` or `str`, *optional*):
350
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
351
+ from the input image. Can be one of:
352
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
353
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
354
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
355
+ """
356
+ do_resize = do_resize if do_resize is not None else self.do_resize
357
+ size = size if size is not None else self.size
358
+ size = get_size_dict(size)
359
+ keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
360
+ ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
361
+ resample = resample if resample is not None else self.resample
362
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
363
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
364
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
365
+ image_mean = image_mean if image_mean is not None else self.image_mean
366
+ image_std = image_std if image_std is not None else self.image_std
367
+ do_pad = do_pad if do_pad is not None else self.do_pad
368
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
369
+
370
+ images = make_list_of_images(images)
371
+
372
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
373
+
374
+ if not valid_images(images):
375
+ raise ValueError(
376
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
377
+ "torch.Tensor, tf.Tensor or jax.ndarray."
378
+ )
379
+ validate_preprocess_arguments(
380
+ do_rescale=do_rescale,
381
+ rescale_factor=rescale_factor,
382
+ do_normalize=do_normalize,
383
+ image_mean=image_mean,
384
+ image_std=image_std,
385
+ do_pad=do_pad,
386
+ size_divisibility=size_divisor,
387
+ do_resize=do_resize,
388
+ size=size,
389
+ resample=resample,
390
+ )
391
+ # All transformations expect numpy arrays.
392
+ images = [to_numpy_array(image) for image in images]
393
+
394
+ if is_scaled_image(images[0]) and do_rescale:
395
+ logger.warning_once(
396
+ "It looks like you are trying to rescale already rescaled images. If the input"
397
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
398
+ )
399
+
400
+ if input_data_format is None:
401
+ # We assume that all images have the same channel dimension format.
402
+ input_data_format = infer_channel_dimension_format(images[0])
403
+
404
+ if do_resize:
405
+ images = [
406
+ self.resize(
407
+ image=image,
408
+ size=size,
409
+ resample=resample,
410
+ keep_aspect_ratio=keep_aspect_ratio,
411
+ ensure_multiple_of=ensure_multiple_of,
412
+ input_data_format=input_data_format,
413
+ )
414
+ for image in images
415
+ ]
416
+
417
+ if do_rescale:
418
+ images = [
419
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
420
+ for image in images
421
+ ]
422
+
423
+ if do_normalize:
424
+ images = [
425
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
426
+ for image in images
427
+ ]
428
+
429
+ if do_pad:
430
+ images = [
431
+ self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format)
432
+ for image in images
433
+ ]
434
+
435
+ images = [
436
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
437
+ ]
438
+
439
+ data = {"pixel_values": images}
440
+ return BatchFeature(data=data, tensor_type=return_tensors)
441
+
442
+ # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->DPT
443
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
444
+ """
445
+ Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
446
+
447
+ Args:
448
+ outputs ([`DPTForSemanticSegmentation`]):
449
+ Raw outputs of the model.
450
+ target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
451
+ List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
452
+ predictions will not be resized.
453
+
454
+ Returns:
455
+ semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
456
+ segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
457
+ specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
458
+ """
459
+ # TODO: add support for other frameworks
460
+ logits = outputs.logits
461
+
462
+ # Resize logits and compute semantic segmentation maps
463
+ if target_sizes is not None:
464
+ if len(logits) != len(target_sizes):
465
+ raise ValueError(
466
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
467
+ )
468
+
469
+ if is_torch_tensor(target_sizes):
470
+ target_sizes = target_sizes.numpy()
471
+
472
+ semantic_segmentation = []
473
+
474
+ for idx in range(len(logits)):
475
+ resized_logits = torch.nn.functional.interpolate(
476
+ logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
477
+ )
478
+ semantic_map = resized_logits[0].argmax(dim=0)
479
+ semantic_segmentation.append(semantic_map)
480
+ else:
481
+ semantic_segmentation = logits.argmax(dim=1)
482
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
483
+
484
+ return semantic_segmentation