applied-ai-018 commited on
Commit
f36433f
·
verified ·
1 Parent(s): f2261a7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/15.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step80/zero/5.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step80/zero/5.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step80/zero/5.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  8. venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__init__.py +23 -0
  11. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/pip/_vendor/distlib/compat.py +1116 -0
  25. venv/lib/python3.10/site-packages/pip/_vendor/distlib/database.py +1345 -0
  26. venv/lib/python3.10/site-packages/pip/_vendor/distlib/index.py +509 -0
  27. venv/lib/python3.10/site-packages/pip/_vendor/distlib/locators.py +1300 -0
  28. venv/lib/python3.10/site-packages/pip/_vendor/distlib/manifest.py +393 -0
  29. venv/lib/python3.10/site-packages/pip/_vendor/distlib/markers.py +152 -0
  30. venv/lib/python3.10/site-packages/pip/_vendor/distlib/metadata.py +1058 -0
  31. venv/lib/python3.10/site-packages/pip/_vendor/distlib/resources.py +358 -0
  32. venv/lib/python3.10/site-packages/pip/_vendor/distlib/scripts.py +429 -0
  33. venv/lib/python3.10/site-packages/pip/_vendor/distlib/util.py +1932 -0
  34. venv/lib/python3.10/site-packages/pip/_vendor/distlib/version.py +739 -0
  35. venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py +1053 -0
  36. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__about__.py +26 -0
  37. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__init__.py +25 -0
  38. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/pip/_vendor/packaging/_manylinux.py +301 -0
  50. venv/lib/python3.10/site-packages/pip/_vendor/packaging/_musllinux.py +136 -0
ckpts/universal/global_step80/zero/15.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:671b787f71d566ded39bde19f1665124313bff7996124ec4a4cbf04dda407bc4
3
+ size 33555533
ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bbe9e958d5170da29a6d01037ddea37859ea7e767f0ff4a85a0365dbc395f32
3
+ size 33555612
ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d197b3b26747de6cd46ec029a53a5500c334a91c18d1232d55b41708118e83a
3
+ size 33555627
ckpts/universal/global_step80/zero/18.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b33d752f05b1504916ba020057c276af508fa36291a0158cb1a7a67e27b6d34
3
+ size 33555533
ckpts/universal/global_step80/zero/5.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:650d00e748bc48f06bb921b3004ce6e95e079545306cf04b330b1ce948949b56
3
+ size 33555612
ckpts/universal/global_step80/zero/5.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ebb90d43e43a0d02e3ae1ea9dba0aaf0c19fd2af02eb087d5aa11e413620b6b
3
+ size 33555627
ckpts/universal/global_step80/zero/5.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05c5ed2b95c1d6e1f6068fcad57f2b271e2b9368b135beea54be1f474caf7353
3
+ size 33555533
venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (289 Bytes). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2012-2019 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+ import logging
8
+
9
+ __version__ = '0.3.4'
10
+
11
+ class DistlibException(Exception):
12
+ pass
13
+
14
+ try:
15
+ from logging import NullHandler
16
+ except ImportError: # pragma: no cover
17
+ class NullHandler(logging.Handler):
18
+ def handle(self, record): pass
19
+ def emit(self, record): pass
20
+ def createLock(self): self.lock = None
21
+
22
+ logger = logging.getLogger(__name__)
23
+ logger.addHandler(NullHandler())
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-310.pyc ADDED
Binary file (31.4 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-310.pyc ADDED
Binary file (42.9 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-310.pyc ADDED
Binary file (38.4 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-310.pyc ADDED
Binary file (51.7 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-310.pyc ADDED
Binary file (27.3 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/distlib/compat.py ADDED
@@ -0,0 +1,1116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2013-2017 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+ from __future__ import absolute_import
8
+
9
+ import os
10
+ import re
11
+ import sys
12
+
13
+ try:
14
+ import ssl
15
+ except ImportError: # pragma: no cover
16
+ ssl = None
17
+
18
+ if sys.version_info[0] < 3: # pragma: no cover
19
+ from StringIO import StringIO
20
+ string_types = basestring,
21
+ text_type = unicode
22
+ from types import FileType as file_type
23
+ import __builtin__ as builtins
24
+ import ConfigParser as configparser
25
+ from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
26
+ from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
27
+ pathname2url, ContentTooShortError, splittype)
28
+
29
+ def quote(s):
30
+ if isinstance(s, unicode):
31
+ s = s.encode('utf-8')
32
+ return _quote(s)
33
+
34
+ import urllib2
35
+ from urllib2 import (Request, urlopen, URLError, HTTPError,
36
+ HTTPBasicAuthHandler, HTTPPasswordMgr,
37
+ HTTPHandler, HTTPRedirectHandler,
38
+ build_opener)
39
+ if ssl:
40
+ from urllib2 import HTTPSHandler
41
+ import httplib
42
+ import xmlrpclib
43
+ import Queue as queue
44
+ from HTMLParser import HTMLParser
45
+ import htmlentitydefs
46
+ raw_input = raw_input
47
+ from itertools import ifilter as filter
48
+ from itertools import ifilterfalse as filterfalse
49
+
50
+ # Leaving this around for now, in case it needs resurrecting in some way
51
+ # _userprog = None
52
+ # def splituser(host):
53
+ # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
54
+ # global _userprog
55
+ # if _userprog is None:
56
+ # import re
57
+ # _userprog = re.compile('^(.*)@(.*)$')
58
+
59
+ # match = _userprog.match(host)
60
+ # if match: return match.group(1, 2)
61
+ # return None, host
62
+
63
+ else: # pragma: no cover
64
+ from io import StringIO
65
+ string_types = str,
66
+ text_type = str
67
+ from io import TextIOWrapper as file_type
68
+ import builtins
69
+ import configparser
70
+ import shutil
71
+ from urllib.parse import (urlparse, urlunparse, urljoin, quote,
72
+ unquote, urlsplit, urlunsplit, splittype)
73
+ from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
74
+ pathname2url,
75
+ HTTPBasicAuthHandler, HTTPPasswordMgr,
76
+ HTTPHandler, HTTPRedirectHandler,
77
+ build_opener)
78
+ if ssl:
79
+ from urllib.request import HTTPSHandler
80
+ from urllib.error import HTTPError, URLError, ContentTooShortError
81
+ import http.client as httplib
82
+ import urllib.request as urllib2
83
+ import xmlrpc.client as xmlrpclib
84
+ import queue
85
+ from html.parser import HTMLParser
86
+ import html.entities as htmlentitydefs
87
+ raw_input = input
88
+ from itertools import filterfalse
89
+ filter = filter
90
+
91
+
92
+ try:
93
+ from ssl import match_hostname, CertificateError
94
+ except ImportError: # pragma: no cover
95
+ class CertificateError(ValueError):
96
+ pass
97
+
98
+
99
+ def _dnsname_match(dn, hostname, max_wildcards=1):
100
+ """Matching according to RFC 6125, section 6.4.3
101
+
102
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
103
+ """
104
+ pats = []
105
+ if not dn:
106
+ return False
107
+
108
+ parts = dn.split('.')
109
+ leftmost, remainder = parts[0], parts[1:]
110
+
111
+ wildcards = leftmost.count('*')
112
+ if wildcards > max_wildcards:
113
+ # Issue #17980: avoid denials of service by refusing more
114
+ # than one wildcard per fragment. A survey of established
115
+ # policy among SSL implementations showed it to be a
116
+ # reasonable choice.
117
+ raise CertificateError(
118
+ "too many wildcards in certificate DNS name: " + repr(dn))
119
+
120
+ # speed up common case w/o wildcards
121
+ if not wildcards:
122
+ return dn.lower() == hostname.lower()
123
+
124
+ # RFC 6125, section 6.4.3, subitem 1.
125
+ # The client SHOULD NOT attempt to match a presented identifier in which
126
+ # the wildcard character comprises a label other than the left-most label.
127
+ if leftmost == '*':
128
+ # When '*' is a fragment by itself, it matches a non-empty dotless
129
+ # fragment.
130
+ pats.append('[^.]+')
131
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
132
+ # RFC 6125, section 6.4.3, subitem 3.
133
+ # The client SHOULD NOT attempt to match a presented identifier
134
+ # where the wildcard character is embedded within an A-label or
135
+ # U-label of an internationalized domain name.
136
+ pats.append(re.escape(leftmost))
137
+ else:
138
+ # Otherwise, '*' matches any dotless string, e.g. www*
139
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
140
+
141
+ # add the remaining fragments, ignore any wildcards
142
+ for frag in remainder:
143
+ pats.append(re.escape(frag))
144
+
145
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
146
+ return pat.match(hostname)
147
+
148
+
149
+ def match_hostname(cert, hostname):
150
+ """Verify that *cert* (in decoded format as returned by
151
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
152
+ rules are followed, but IP addresses are not accepted for *hostname*.
153
+
154
+ CertificateError is raised on failure. On success, the function
155
+ returns nothing.
156
+ """
157
+ if not cert:
158
+ raise ValueError("empty or no certificate, match_hostname needs a "
159
+ "SSL socket or SSL context with either "
160
+ "CERT_OPTIONAL or CERT_REQUIRED")
161
+ dnsnames = []
162
+ san = cert.get('subjectAltName', ())
163
+ for key, value in san:
164
+ if key == 'DNS':
165
+ if _dnsname_match(value, hostname):
166
+ return
167
+ dnsnames.append(value)
168
+ if not dnsnames:
169
+ # The subject is only checked when there is no dNSName entry
170
+ # in subjectAltName
171
+ for sub in cert.get('subject', ()):
172
+ for key, value in sub:
173
+ # XXX according to RFC 2818, the most specific Common Name
174
+ # must be used.
175
+ if key == 'commonName':
176
+ if _dnsname_match(value, hostname):
177
+ return
178
+ dnsnames.append(value)
179
+ if len(dnsnames) > 1:
180
+ raise CertificateError("hostname %r "
181
+ "doesn't match either of %s"
182
+ % (hostname, ', '.join(map(repr, dnsnames))))
183
+ elif len(dnsnames) == 1:
184
+ raise CertificateError("hostname %r "
185
+ "doesn't match %r"
186
+ % (hostname, dnsnames[0]))
187
+ else:
188
+ raise CertificateError("no appropriate commonName or "
189
+ "subjectAltName fields were found")
190
+
191
+
192
+ try:
193
+ from types import SimpleNamespace as Container
194
+ except ImportError: # pragma: no cover
195
+ class Container(object):
196
+ """
197
+ A generic container for when multiple values need to be returned
198
+ """
199
+ def __init__(self, **kwargs):
200
+ self.__dict__.update(kwargs)
201
+
202
+
203
+ try:
204
+ from shutil import which
205
+ except ImportError: # pragma: no cover
206
+ # Implementation from Python 3.3
207
+ def which(cmd, mode=os.F_OK | os.X_OK, path=None):
208
+ """Given a command, mode, and a PATH string, return the path which
209
+ conforms to the given mode on the PATH, or None if there is no such
210
+ file.
211
+
212
+ `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
213
+ of os.environ.get("PATH"), or can be overridden with a custom search
214
+ path.
215
+
216
+ """
217
+ # Check that a given file can be accessed with the correct mode.
218
+ # Additionally check that `file` is not a directory, as on Windows
219
+ # directories pass the os.access check.
220
+ def _access_check(fn, mode):
221
+ return (os.path.exists(fn) and os.access(fn, mode)
222
+ and not os.path.isdir(fn))
223
+
224
+ # If we're given a path with a directory part, look it up directly rather
225
+ # than referring to PATH directories. This includes checking relative to the
226
+ # current directory, e.g. ./script
227
+ if os.path.dirname(cmd):
228
+ if _access_check(cmd, mode):
229
+ return cmd
230
+ return None
231
+
232
+ if path is None:
233
+ path = os.environ.get("PATH", os.defpath)
234
+ if not path:
235
+ return None
236
+ path = path.split(os.pathsep)
237
+
238
+ if sys.platform == "win32":
239
+ # The current directory takes precedence on Windows.
240
+ if not os.curdir in path:
241
+ path.insert(0, os.curdir)
242
+
243
+ # PATHEXT is necessary to check on Windows.
244
+ pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
245
+ # See if the given file matches any of the expected path extensions.
246
+ # This will allow us to short circuit when given "python.exe".
247
+ # If it does match, only test that one, otherwise we have to try
248
+ # others.
249
+ if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
250
+ files = [cmd]
251
+ else:
252
+ files = [cmd + ext for ext in pathext]
253
+ else:
254
+ # On other platforms you don't have things like PATHEXT to tell you
255
+ # what file suffixes are executable, so just pass on cmd as-is.
256
+ files = [cmd]
257
+
258
+ seen = set()
259
+ for dir in path:
260
+ normdir = os.path.normcase(dir)
261
+ if not normdir in seen:
262
+ seen.add(normdir)
263
+ for thefile in files:
264
+ name = os.path.join(dir, thefile)
265
+ if _access_check(name, mode):
266
+ return name
267
+ return None
268
+
269
+
270
+ # ZipFile is a context manager in 2.7, but not in 2.6
271
+
272
+ from zipfile import ZipFile as BaseZipFile
273
+
274
+ if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
275
+ ZipFile = BaseZipFile
276
+ else: # pragma: no cover
277
+ from zipfile import ZipExtFile as BaseZipExtFile
278
+
279
+ class ZipExtFile(BaseZipExtFile):
280
+ def __init__(self, base):
281
+ self.__dict__.update(base.__dict__)
282
+
283
+ def __enter__(self):
284
+ return self
285
+
286
+ def __exit__(self, *exc_info):
287
+ self.close()
288
+ # return None, so if an exception occurred, it will propagate
289
+
290
+ class ZipFile(BaseZipFile):
291
+ def __enter__(self):
292
+ return self
293
+
294
+ def __exit__(self, *exc_info):
295
+ self.close()
296
+ # return None, so if an exception occurred, it will propagate
297
+
298
+ def open(self, *args, **kwargs):
299
+ base = BaseZipFile.open(self, *args, **kwargs)
300
+ return ZipExtFile(base)
301
+
302
+ try:
303
+ from platform import python_implementation
304
+ except ImportError: # pragma: no cover
305
+ def python_implementation():
306
+ """Return a string identifying the Python implementation."""
307
+ if 'PyPy' in sys.version:
308
+ return 'PyPy'
309
+ if os.name == 'java':
310
+ return 'Jython'
311
+ if sys.version.startswith('IronPython'):
312
+ return 'IronPython'
313
+ return 'CPython'
314
+
315
+ import shutil
316
+ import sysconfig
317
+
318
+ try:
319
+ callable = callable
320
+ except NameError: # pragma: no cover
321
+ from collections.abc import Callable
322
+
323
+ def callable(obj):
324
+ return isinstance(obj, Callable)
325
+
326
+
327
+ try:
328
+ fsencode = os.fsencode
329
+ fsdecode = os.fsdecode
330
+ except AttributeError: # pragma: no cover
331
+ # Issue #99: on some systems (e.g. containerised),
332
+ # sys.getfilesystemencoding() returns None, and we need a real value,
333
+ # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and
334
+ # sys.getfilesystemencoding(): the return value is "the user’s preference
335
+ # according to the result of nl_langinfo(CODESET), or None if the
336
+ # nl_langinfo(CODESET) failed."
337
+ _fsencoding = sys.getfilesystemencoding() or 'utf-8'
338
+ if _fsencoding == 'mbcs':
339
+ _fserrors = 'strict'
340
+ else:
341
+ _fserrors = 'surrogateescape'
342
+
343
+ def fsencode(filename):
344
+ if isinstance(filename, bytes):
345
+ return filename
346
+ elif isinstance(filename, text_type):
347
+ return filename.encode(_fsencoding, _fserrors)
348
+ else:
349
+ raise TypeError("expect bytes or str, not %s" %
350
+ type(filename).__name__)
351
+
352
+ def fsdecode(filename):
353
+ if isinstance(filename, text_type):
354
+ return filename
355
+ elif isinstance(filename, bytes):
356
+ return filename.decode(_fsencoding, _fserrors)
357
+ else:
358
+ raise TypeError("expect bytes or str, not %s" %
359
+ type(filename).__name__)
360
+
361
+ try:
362
+ from tokenize import detect_encoding
363
+ except ImportError: # pragma: no cover
364
+ from codecs import BOM_UTF8, lookup
365
+ import re
366
+
367
+ cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
368
+
369
+ def _get_normal_name(orig_enc):
370
+ """Imitates get_normal_name in tokenizer.c."""
371
+ # Only care about the first 12 characters.
372
+ enc = orig_enc[:12].lower().replace("_", "-")
373
+ if enc == "utf-8" or enc.startswith("utf-8-"):
374
+ return "utf-8"
375
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
376
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
377
+ return "iso-8859-1"
378
+ return orig_enc
379
+
380
+ def detect_encoding(readline):
381
+ """
382
+ The detect_encoding() function is used to detect the encoding that should
383
+ be used to decode a Python source file. It requires one argument, readline,
384
+ in the same way as the tokenize() generator.
385
+
386
+ It will call readline a maximum of twice, and return the encoding used
387
+ (as a string) and a list of any lines (left as bytes) it has read in.
388
+
389
+ It detects the encoding from the presence of a utf-8 bom or an encoding
390
+ cookie as specified in pep-0263. If both a bom and a cookie are present,
391
+ but disagree, a SyntaxError will be raised. If the encoding cookie is an
392
+ invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
393
+ 'utf-8-sig' is returned.
394
+
395
+ If no encoding is specified, then the default of 'utf-8' will be returned.
396
+ """
397
+ try:
398
+ filename = readline.__self__.name
399
+ except AttributeError:
400
+ filename = None
401
+ bom_found = False
402
+ encoding = None
403
+ default = 'utf-8'
404
+ def read_or_stop():
405
+ try:
406
+ return readline()
407
+ except StopIteration:
408
+ return b''
409
+
410
+ def find_cookie(line):
411
+ try:
412
+ # Decode as UTF-8. Either the line is an encoding declaration,
413
+ # in which case it should be pure ASCII, or it must be UTF-8
414
+ # per default encoding.
415
+ line_string = line.decode('utf-8')
416
+ except UnicodeDecodeError:
417
+ msg = "invalid or missing encoding declaration"
418
+ if filename is not None:
419
+ msg = '{} for {!r}'.format(msg, filename)
420
+ raise SyntaxError(msg)
421
+
422
+ matches = cookie_re.findall(line_string)
423
+ if not matches:
424
+ return None
425
+ encoding = _get_normal_name(matches[0])
426
+ try:
427
+ codec = lookup(encoding)
428
+ except LookupError:
429
+ # This behaviour mimics the Python interpreter
430
+ if filename is None:
431
+ msg = "unknown encoding: " + encoding
432
+ else:
433
+ msg = "unknown encoding for {!r}: {}".format(filename,
434
+ encoding)
435
+ raise SyntaxError(msg)
436
+
437
+ if bom_found:
438
+ if codec.name != 'utf-8':
439
+ # This behaviour mimics the Python interpreter
440
+ if filename is None:
441
+ msg = 'encoding problem: utf-8'
442
+ else:
443
+ msg = 'encoding problem for {!r}: utf-8'.format(filename)
444
+ raise SyntaxError(msg)
445
+ encoding += '-sig'
446
+ return encoding
447
+
448
+ first = read_or_stop()
449
+ if first.startswith(BOM_UTF8):
450
+ bom_found = True
451
+ first = first[3:]
452
+ default = 'utf-8-sig'
453
+ if not first:
454
+ return default, []
455
+
456
+ encoding = find_cookie(first)
457
+ if encoding:
458
+ return encoding, [first]
459
+
460
+ second = read_or_stop()
461
+ if not second:
462
+ return default, [first]
463
+
464
+ encoding = find_cookie(second)
465
+ if encoding:
466
+ return encoding, [first, second]
467
+
468
+ return default, [first, second]
469
+
470
+ # For converting & <-> &amp; etc.
471
+ try:
472
+ from html import escape
473
+ except ImportError:
474
+ from cgi import escape
475
+ if sys.version_info[:2] < (3, 4):
476
+ unescape = HTMLParser().unescape
477
+ else:
478
+ from html import unescape
479
+
480
+ try:
481
+ from collections import ChainMap
482
+ except ImportError: # pragma: no cover
483
+ from collections import MutableMapping
484
+
485
+ try:
486
+ from reprlib import recursive_repr as _recursive_repr
487
+ except ImportError:
488
+ def _recursive_repr(fillvalue='...'):
489
+ '''
490
+ Decorator to make a repr function return fillvalue for a recursive
491
+ call
492
+ '''
493
+
494
+ def decorating_function(user_function):
495
+ repr_running = set()
496
+
497
+ def wrapper(self):
498
+ key = id(self), get_ident()
499
+ if key in repr_running:
500
+ return fillvalue
501
+ repr_running.add(key)
502
+ try:
503
+ result = user_function(self)
504
+ finally:
505
+ repr_running.discard(key)
506
+ return result
507
+
508
+ # Can't use functools.wraps() here because of bootstrap issues
509
+ wrapper.__module__ = getattr(user_function, '__module__')
510
+ wrapper.__doc__ = getattr(user_function, '__doc__')
511
+ wrapper.__name__ = getattr(user_function, '__name__')
512
+ wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
513
+ return wrapper
514
+
515
+ return decorating_function
516
+
517
+ class ChainMap(MutableMapping):
518
+ ''' A ChainMap groups multiple dicts (or other mappings) together
519
+ to create a single, updateable view.
520
+
521
+ The underlying mappings are stored in a list. That list is public and can
522
+ accessed or updated using the *maps* attribute. There is no other state.
523
+
524
+ Lookups search the underlying mappings successively until a key is found.
525
+ In contrast, writes, updates, and deletions only operate on the first
526
+ mapping.
527
+
528
+ '''
529
+
530
+ def __init__(self, *maps):
531
+ '''Initialize a ChainMap by setting *maps* to the given mappings.
532
+ If no mappings are provided, a single empty dictionary is used.
533
+
534
+ '''
535
+ self.maps = list(maps) or [{}] # always at least one map
536
+
537
+ def __missing__(self, key):
538
+ raise KeyError(key)
539
+
540
+ def __getitem__(self, key):
541
+ for mapping in self.maps:
542
+ try:
543
+ return mapping[key] # can't use 'key in mapping' with defaultdict
544
+ except KeyError:
545
+ pass
546
+ return self.__missing__(key) # support subclasses that define __missing__
547
+
548
+ def get(self, key, default=None):
549
+ return self[key] if key in self else default
550
+
551
+ def __len__(self):
552
+ return len(set().union(*self.maps)) # reuses stored hash values if possible
553
+
554
+ def __iter__(self):
555
+ return iter(set().union(*self.maps))
556
+
557
+ def __contains__(self, key):
558
+ return any(key in m for m in self.maps)
559
+
560
+ def __bool__(self):
561
+ return any(self.maps)
562
+
563
+ @_recursive_repr()
564
+ def __repr__(self):
565
+ return '{0.__class__.__name__}({1})'.format(
566
+ self, ', '.join(map(repr, self.maps)))
567
+
568
+ @classmethod
569
+ def fromkeys(cls, iterable, *args):
570
+ 'Create a ChainMap with a single dict created from the iterable.'
571
+ return cls(dict.fromkeys(iterable, *args))
572
+
573
+ def copy(self):
574
+ 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
575
+ return self.__class__(self.maps[0].copy(), *self.maps[1:])
576
+
577
+ __copy__ = copy
578
+
579
+ def new_child(self): # like Django's Context.push()
580
+ 'New ChainMap with a new dict followed by all previous maps.'
581
+ return self.__class__({}, *self.maps)
582
+
583
+ @property
584
+ def parents(self): # like Django's Context.pop()
585
+ 'New ChainMap from maps[1:].'
586
+ return self.__class__(*self.maps[1:])
587
+
588
+ def __setitem__(self, key, value):
589
+ self.maps[0][key] = value
590
+
591
+ def __delitem__(self, key):
592
+ try:
593
+ del self.maps[0][key]
594
+ except KeyError:
595
+ raise KeyError('Key not found in the first mapping: {!r}'.format(key))
596
+
597
+ def popitem(self):
598
+ 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
599
+ try:
600
+ return self.maps[0].popitem()
601
+ except KeyError:
602
+ raise KeyError('No keys found in the first mapping.')
603
+
604
+ def pop(self, key, *args):
605
+ 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
606
+ try:
607
+ return self.maps[0].pop(key, *args)
608
+ except KeyError:
609
+ raise KeyError('Key not found in the first mapping: {!r}'.format(key))
610
+
611
+ def clear(self):
612
+ 'Clear maps[0], leaving maps[1:] intact.'
613
+ self.maps[0].clear()
614
+
615
+ try:
616
+ from importlib.util import cache_from_source # Python >= 3.4
617
+ except ImportError: # pragma: no cover
618
+ def cache_from_source(path, debug_override=None):
619
+ assert path.endswith('.py')
620
+ if debug_override is None:
621
+ debug_override = __debug__
622
+ if debug_override:
623
+ suffix = 'c'
624
+ else:
625
+ suffix = 'o'
626
+ return path + suffix
627
+
628
+ try:
629
+ from collections import OrderedDict
630
+ except ImportError: # pragma: no cover
631
+ ## {{{ http://code.activestate.com/recipes/576693/ (r9)
632
+ # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
633
+ # Passes Python2.7's test suite and incorporates all the latest updates.
634
+ try:
635
+ from thread import get_ident as _get_ident
636
+ except ImportError:
637
+ from dummy_thread import get_ident as _get_ident
638
+
639
+ try:
640
+ from _abcoll import KeysView, ValuesView, ItemsView
641
+ except ImportError:
642
+ pass
643
+
644
+
645
+ class OrderedDict(dict):
646
+ 'Dictionary that remembers insertion order'
647
+ # An inherited dict maps keys to values.
648
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
649
+ # The remaining methods are order-aware.
650
+ # Big-O running times for all methods are the same as for regular dictionaries.
651
+
652
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
653
+ # The circular doubly linked list starts and ends with a sentinel element.
654
+ # The sentinel element never gets deleted (this simplifies the algorithm).
655
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
656
+
657
+ def __init__(self, *args, **kwds):
658
+ '''Initialize an ordered dictionary. Signature is the same as for
659
+ regular dictionaries, but keyword arguments are not recommended
660
+ because their insertion order is arbitrary.
661
+
662
+ '''
663
+ if len(args) > 1:
664
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
665
+ try:
666
+ self.__root
667
+ except AttributeError:
668
+ self.__root = root = [] # sentinel node
669
+ root[:] = [root, root, None]
670
+ self.__map = {}
671
+ self.__update(*args, **kwds)
672
+
673
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
674
+ 'od.__setitem__(i, y) <==> od[i]=y'
675
+ # Setting a new item creates a new link which goes at the end of the linked
676
+ # list, and the inherited dictionary is updated with the new key/value pair.
677
+ if key not in self:
678
+ root = self.__root
679
+ last = root[0]
680
+ last[1] = root[0] = self.__map[key] = [last, root, key]
681
+ dict_setitem(self, key, value)
682
+
683
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
684
+ 'od.__delitem__(y) <==> del od[y]'
685
+ # Deleting an existing item uses self.__map to find the link which is
686
+ # then removed by updating the links in the predecessor and successor nodes.
687
+ dict_delitem(self, key)
688
+ link_prev, link_next, key = self.__map.pop(key)
689
+ link_prev[1] = link_next
690
+ link_next[0] = link_prev
691
+
692
+ def __iter__(self):
693
+ 'od.__iter__() <==> iter(od)'
694
+ root = self.__root
695
+ curr = root[1]
696
+ while curr is not root:
697
+ yield curr[2]
698
+ curr = curr[1]
699
+
700
+ def __reversed__(self):
701
+ 'od.__reversed__() <==> reversed(od)'
702
+ root = self.__root
703
+ curr = root[0]
704
+ while curr is not root:
705
+ yield curr[2]
706
+ curr = curr[0]
707
+
708
+ def clear(self):
709
+ 'od.clear() -> None. Remove all items from od.'
710
+ try:
711
+ for node in self.__map.itervalues():
712
+ del node[:]
713
+ root = self.__root
714
+ root[:] = [root, root, None]
715
+ self.__map.clear()
716
+ except AttributeError:
717
+ pass
718
+ dict.clear(self)
719
+
720
+ def popitem(self, last=True):
721
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
722
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
723
+
724
+ '''
725
+ if not self:
726
+ raise KeyError('dictionary is empty')
727
+ root = self.__root
728
+ if last:
729
+ link = root[0]
730
+ link_prev = link[0]
731
+ link_prev[1] = root
732
+ root[0] = link_prev
733
+ else:
734
+ link = root[1]
735
+ link_next = link[1]
736
+ root[1] = link_next
737
+ link_next[0] = root
738
+ key = link[2]
739
+ del self.__map[key]
740
+ value = dict.pop(self, key)
741
+ return key, value
742
+
743
+ # -- the following methods do not depend on the internal structure --
744
+
745
+ def keys(self):
746
+ 'od.keys() -> list of keys in od'
747
+ return list(self)
748
+
749
+ def values(self):
750
+ 'od.values() -> list of values in od'
751
+ return [self[key] for key in self]
752
+
753
+ def items(self):
754
+ 'od.items() -> list of (key, value) pairs in od'
755
+ return [(key, self[key]) for key in self]
756
+
757
+ def iterkeys(self):
758
+ 'od.iterkeys() -> an iterator over the keys in od'
759
+ return iter(self)
760
+
761
+ def itervalues(self):
762
+ 'od.itervalues -> an iterator over the values in od'
763
+ for k in self:
764
+ yield self[k]
765
+
766
+ def iteritems(self):
767
+ 'od.iteritems -> an iterator over the (key, value) items in od'
768
+ for k in self:
769
+ yield (k, self[k])
770
+
771
+ def update(*args, **kwds):
772
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
773
+
774
+ If E is a dict instance, does: for k in E: od[k] = E[k]
775
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
776
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
777
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
778
+
779
+ '''
780
+ if len(args) > 2:
781
+ raise TypeError('update() takes at most 2 positional '
782
+ 'arguments (%d given)' % (len(args),))
783
+ elif not args:
784
+ raise TypeError('update() takes at least 1 argument (0 given)')
785
+ self = args[0]
786
+ # Make progressively weaker assumptions about "other"
787
+ other = ()
788
+ if len(args) == 2:
789
+ other = args[1]
790
+ if isinstance(other, dict):
791
+ for key in other:
792
+ self[key] = other[key]
793
+ elif hasattr(other, 'keys'):
794
+ for key in other.keys():
795
+ self[key] = other[key]
796
+ else:
797
+ for key, value in other:
798
+ self[key] = value
799
+ for key, value in kwds.items():
800
+ self[key] = value
801
+
802
+ __update = update # let subclasses override update without breaking __init__
803
+
804
+ __marker = object()
805
+
806
+ def pop(self, key, default=__marker):
807
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
808
+ If key is not found, d is returned if given, otherwise KeyError is raised.
809
+
810
+ '''
811
+ if key in self:
812
+ result = self[key]
813
+ del self[key]
814
+ return result
815
+ if default is self.__marker:
816
+ raise KeyError(key)
817
+ return default
818
+
819
+ def setdefault(self, key, default=None):
820
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
821
+ if key in self:
822
+ return self[key]
823
+ self[key] = default
824
+ return default
825
+
826
+ def __repr__(self, _repr_running=None):
827
+ 'od.__repr__() <==> repr(od)'
828
+ if not _repr_running: _repr_running = {}
829
+ call_key = id(self), _get_ident()
830
+ if call_key in _repr_running:
831
+ return '...'
832
+ _repr_running[call_key] = 1
833
+ try:
834
+ if not self:
835
+ return '%s()' % (self.__class__.__name__,)
836
+ return '%s(%r)' % (self.__class__.__name__, self.items())
837
+ finally:
838
+ del _repr_running[call_key]
839
+
840
+ def __reduce__(self):
841
+ 'Return state information for pickling'
842
+ items = [[k, self[k]] for k in self]
843
+ inst_dict = vars(self).copy()
844
+ for k in vars(OrderedDict()):
845
+ inst_dict.pop(k, None)
846
+ if inst_dict:
847
+ return (self.__class__, (items,), inst_dict)
848
+ return self.__class__, (items,)
849
+
850
+ def copy(self):
851
+ 'od.copy() -> a shallow copy of od'
852
+ return self.__class__(self)
853
+
854
+ @classmethod
855
+ def fromkeys(cls, iterable, value=None):
856
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
857
+ and values equal to v (which defaults to None).
858
+
859
+ '''
860
+ d = cls()
861
+ for key in iterable:
862
+ d[key] = value
863
+ return d
864
+
865
+ def __eq__(self, other):
866
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
867
+ while comparison to a regular mapping is order-insensitive.
868
+
869
+ '''
870
+ if isinstance(other, OrderedDict):
871
+ return len(self)==len(other) and self.items() == other.items()
872
+ return dict.__eq__(self, other)
873
+
874
+ def __ne__(self, other):
875
+ return not self == other
876
+
877
+ # -- the following methods are only used in Python 2.7 --
878
+
879
+ def viewkeys(self):
880
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
881
+ return KeysView(self)
882
+
883
+ def viewvalues(self):
884
+ "od.viewvalues() -> an object providing a view on od's values"
885
+ return ValuesView(self)
886
+
887
+ def viewitems(self):
888
+ "od.viewitems() -> a set-like object providing a view on od's items"
889
+ return ItemsView(self)
890
+
891
+ try:
892
+ from logging.config import BaseConfigurator, valid_ident
893
+ except ImportError: # pragma: no cover
894
+ IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
895
+
896
+
897
+ def valid_ident(s):
898
+ m = IDENTIFIER.match(s)
899
+ if not m:
900
+ raise ValueError('Not a valid Python identifier: %r' % s)
901
+ return True
902
+
903
+
904
+ # The ConvertingXXX classes are wrappers around standard Python containers,
905
+ # and they serve to convert any suitable values in the container. The
906
+ # conversion converts base dicts, lists and tuples to their wrapped
907
+ # equivalents, whereas strings which match a conversion format are converted
908
+ # appropriately.
909
+ #
910
+ # Each wrapper should have a configurator attribute holding the actual
911
+ # configurator to use for conversion.
912
+
913
+ class ConvertingDict(dict):
914
+ """A converting dictionary wrapper."""
915
+
916
+ def __getitem__(self, key):
917
+ value = dict.__getitem__(self, key)
918
+ result = self.configurator.convert(value)
919
+ #If the converted value is different, save for next time
920
+ if value is not result:
921
+ self[key] = result
922
+ if type(result) in (ConvertingDict, ConvertingList,
923
+ ConvertingTuple):
924
+ result.parent = self
925
+ result.key = key
926
+ return result
927
+
928
+ def get(self, key, default=None):
929
+ value = dict.get(self, key, default)
930
+ result = self.configurator.convert(value)
931
+ #If the converted value is different, save for next time
932
+ if value is not result:
933
+ self[key] = result
934
+ if type(result) in (ConvertingDict, ConvertingList,
935
+ ConvertingTuple):
936
+ result.parent = self
937
+ result.key = key
938
+ return result
939
+
940
+ def pop(self, key, default=None):
941
+ value = dict.pop(self, key, default)
942
+ result = self.configurator.convert(value)
943
+ if value is not result:
944
+ if type(result) in (ConvertingDict, ConvertingList,
945
+ ConvertingTuple):
946
+ result.parent = self
947
+ result.key = key
948
+ return result
949
+
950
+ class ConvertingList(list):
951
+ """A converting list wrapper."""
952
+ def __getitem__(self, key):
953
+ value = list.__getitem__(self, key)
954
+ result = self.configurator.convert(value)
955
+ #If the converted value is different, save for next time
956
+ if value is not result:
957
+ self[key] = result
958
+ if type(result) in (ConvertingDict, ConvertingList,
959
+ ConvertingTuple):
960
+ result.parent = self
961
+ result.key = key
962
+ return result
963
+
964
+ def pop(self, idx=-1):
965
+ value = list.pop(self, idx)
966
+ result = self.configurator.convert(value)
967
+ if value is not result:
968
+ if type(result) in (ConvertingDict, ConvertingList,
969
+ ConvertingTuple):
970
+ result.parent = self
971
+ return result
972
+
973
+ class ConvertingTuple(tuple):
974
+ """A converting tuple wrapper."""
975
+ def __getitem__(self, key):
976
+ value = tuple.__getitem__(self, key)
977
+ result = self.configurator.convert(value)
978
+ if value is not result:
979
+ if type(result) in (ConvertingDict, ConvertingList,
980
+ ConvertingTuple):
981
+ result.parent = self
982
+ result.key = key
983
+ return result
984
+
985
+ class BaseConfigurator(object):
986
+ """
987
+ The configurator base class which defines some useful defaults.
988
+ """
989
+
990
+ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
991
+
992
+ WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
993
+ DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
994
+ INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
995
+ DIGIT_PATTERN = re.compile(r'^\d+$')
996
+
997
+ value_converters = {
998
+ 'ext' : 'ext_convert',
999
+ 'cfg' : 'cfg_convert',
1000
+ }
1001
+
1002
+ # We might want to use a different one, e.g. importlib
1003
+ importer = staticmethod(__import__)
1004
+
1005
+ def __init__(self, config):
1006
+ self.config = ConvertingDict(config)
1007
+ self.config.configurator = self
1008
+
1009
+ def resolve(self, s):
1010
+ """
1011
+ Resolve strings to objects using standard import and attribute
1012
+ syntax.
1013
+ """
1014
+ name = s.split('.')
1015
+ used = name.pop(0)
1016
+ try:
1017
+ found = self.importer(used)
1018
+ for frag in name:
1019
+ used += '.' + frag
1020
+ try:
1021
+ found = getattr(found, frag)
1022
+ except AttributeError:
1023
+ self.importer(used)
1024
+ found = getattr(found, frag)
1025
+ return found
1026
+ except ImportError:
1027
+ e, tb = sys.exc_info()[1:]
1028
+ v = ValueError('Cannot resolve %r: %s' % (s, e))
1029
+ v.__cause__, v.__traceback__ = e, tb
1030
+ raise v
1031
+
1032
+ def ext_convert(self, value):
1033
+ """Default converter for the ext:// protocol."""
1034
+ return self.resolve(value)
1035
+
1036
+ def cfg_convert(self, value):
1037
+ """Default converter for the cfg:// protocol."""
1038
+ rest = value
1039
+ m = self.WORD_PATTERN.match(rest)
1040
+ if m is None:
1041
+ raise ValueError("Unable to convert %r" % value)
1042
+ else:
1043
+ rest = rest[m.end():]
1044
+ d = self.config[m.groups()[0]]
1045
+ #print d, rest
1046
+ while rest:
1047
+ m = self.DOT_PATTERN.match(rest)
1048
+ if m:
1049
+ d = d[m.groups()[0]]
1050
+ else:
1051
+ m = self.INDEX_PATTERN.match(rest)
1052
+ if m:
1053
+ idx = m.groups()[0]
1054
+ if not self.DIGIT_PATTERN.match(idx):
1055
+ d = d[idx]
1056
+ else:
1057
+ try:
1058
+ n = int(idx) # try as number first (most likely)
1059
+ d = d[n]
1060
+ except TypeError:
1061
+ d = d[idx]
1062
+ if m:
1063
+ rest = rest[m.end():]
1064
+ else:
1065
+ raise ValueError('Unable to convert '
1066
+ '%r at %r' % (value, rest))
1067
+ #rest should be empty
1068
+ return d
1069
+
1070
+ def convert(self, value):
1071
+ """
1072
+ Convert values to an appropriate type. dicts, lists and tuples are
1073
+ replaced by their converting alternatives. Strings are checked to
1074
+ see if they have a conversion format and are converted if they do.
1075
+ """
1076
+ if not isinstance(value, ConvertingDict) and isinstance(value, dict):
1077
+ value = ConvertingDict(value)
1078
+ value.configurator = self
1079
+ elif not isinstance(value, ConvertingList) and isinstance(value, list):
1080
+ value = ConvertingList(value)
1081
+ value.configurator = self
1082
+ elif not isinstance(value, ConvertingTuple) and\
1083
+ isinstance(value, tuple):
1084
+ value = ConvertingTuple(value)
1085
+ value.configurator = self
1086
+ elif isinstance(value, string_types):
1087
+ m = self.CONVERT_PATTERN.match(value)
1088
+ if m:
1089
+ d = m.groupdict()
1090
+ prefix = d['prefix']
1091
+ converter = self.value_converters.get(prefix, None)
1092
+ if converter:
1093
+ suffix = d['suffix']
1094
+ converter = getattr(self, converter)
1095
+ value = converter(suffix)
1096
+ return value
1097
+
1098
+ def configure_custom(self, config):
1099
+ """Configure an object with a user-supplied factory."""
1100
+ c = config.pop('()')
1101
+ if not callable(c):
1102
+ c = self.resolve(c)
1103
+ props = config.pop('.', None)
1104
+ # Check for valid identifiers
1105
+ kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
1106
+ result = c(**kwargs)
1107
+ if props:
1108
+ for name, value in props.items():
1109
+ setattr(result, name, value)
1110
+ return result
1111
+
1112
+ def as_tuple(self, value):
1113
+ """Utility function which converts lists to tuples."""
1114
+ if isinstance(value, list):
1115
+ value = tuple(value)
1116
+ return value
venv/lib/python3.10/site-packages/pip/_vendor/distlib/database.py ADDED
@@ -0,0 +1,1345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2012-2017 The Python Software Foundation.
4
+ # See LICENSE.txt and CONTRIBUTORS.txt.
5
+ #
6
+ """PEP 376 implementation."""
7
+
8
+ from __future__ import unicode_literals
9
+
10
+ import base64
11
+ import codecs
12
+ import contextlib
13
+ import hashlib
14
+ import logging
15
+ import os
16
+ import posixpath
17
+ import sys
18
+ import zipimport
19
+
20
+ from . import DistlibException, resources
21
+ from .compat import StringIO
22
+ from .version import get_scheme, UnsupportedVersionError
23
+ from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
24
+ LEGACY_METADATA_FILENAME)
25
+ from .util import (parse_requirement, cached_property, parse_name_and_version,
26
+ read_exports, write_exports, CSVReader, CSVWriter)
27
+
28
+
29
+ __all__ = ['Distribution', 'BaseInstalledDistribution',
30
+ 'InstalledDistribution', 'EggInfoDistribution',
31
+ 'DistributionPath']
32
+
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ EXPORTS_FILENAME = 'pydist-exports.json'
37
+ COMMANDS_FILENAME = 'pydist-commands.json'
38
+
39
+ DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
40
+ 'RESOURCES', EXPORTS_FILENAME, 'SHARED')
41
+
42
+ DISTINFO_EXT = '.dist-info'
43
+
44
+
45
+ class _Cache(object):
46
+ """
47
+ A simple cache mapping names and .dist-info paths to distributions
48
+ """
49
+ def __init__(self):
50
+ """
51
+ Initialise an instance. There is normally one for each DistributionPath.
52
+ """
53
+ self.name = {}
54
+ self.path = {}
55
+ self.generated = False
56
+
57
+ def clear(self):
58
+ """
59
+ Clear the cache, setting it to its initial state.
60
+ """
61
+ self.name.clear()
62
+ self.path.clear()
63
+ self.generated = False
64
+
65
+ def add(self, dist):
66
+ """
67
+ Add a distribution to the cache.
68
+ :param dist: The distribution to add.
69
+ """
70
+ if dist.path not in self.path:
71
+ self.path[dist.path] = dist
72
+ self.name.setdefault(dist.key, []).append(dist)
73
+
74
+
75
+ class DistributionPath(object):
76
+ """
77
+ Represents a set of distributions installed on a path (typically sys.path).
78
+ """
79
+ def __init__(self, path=None, include_egg=False):
80
+ """
81
+ Create an instance from a path, optionally including legacy (distutils/
82
+ setuptools/distribute) distributions.
83
+ :param path: The path to use, as a list of directories. If not specified,
84
+ sys.path is used.
85
+ :param include_egg: If True, this instance will look for and return legacy
86
+ distributions as well as those based on PEP 376.
87
+ """
88
+ if path is None:
89
+ path = sys.path
90
+ self.path = path
91
+ self._include_dist = True
92
+ self._include_egg = include_egg
93
+
94
+ self._cache = _Cache()
95
+ self._cache_egg = _Cache()
96
+ self._cache_enabled = True
97
+ self._scheme = get_scheme('default')
98
+
99
+ def _get_cache_enabled(self):
100
+ return self._cache_enabled
101
+
102
+ def _set_cache_enabled(self, value):
103
+ self._cache_enabled = value
104
+
105
+ cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
106
+
107
+ def clear_cache(self):
108
+ """
109
+ Clears the internal cache.
110
+ """
111
+ self._cache.clear()
112
+ self._cache_egg.clear()
113
+
114
+
115
+ def _yield_distributions(self):
116
+ """
117
+ Yield .dist-info and/or .egg(-info) distributions.
118
+ """
119
+ # We need to check if we've seen some resources already, because on
120
+ # some Linux systems (e.g. some Debian/Ubuntu variants) there are
121
+ # symlinks which alias other files in the environment.
122
+ seen = set()
123
+ for path in self.path:
124
+ finder = resources.finder_for_path(path)
125
+ if finder is None:
126
+ continue
127
+ r = finder.find('')
128
+ if not r or not r.is_container:
129
+ continue
130
+ rset = sorted(r.resources)
131
+ for entry in rset:
132
+ r = finder.find(entry)
133
+ if not r or r.path in seen:
134
+ continue
135
+ try:
136
+ if self._include_dist and entry.endswith(DISTINFO_EXT):
137
+ possible_filenames = [METADATA_FILENAME,
138
+ WHEEL_METADATA_FILENAME,
139
+ LEGACY_METADATA_FILENAME]
140
+ for metadata_filename in possible_filenames:
141
+ metadata_path = posixpath.join(entry, metadata_filename)
142
+ pydist = finder.find(metadata_path)
143
+ if pydist:
144
+ break
145
+ else:
146
+ continue
147
+
148
+ with contextlib.closing(pydist.as_stream()) as stream:
149
+ metadata = Metadata(fileobj=stream, scheme='legacy')
150
+ logger.debug('Found %s', r.path)
151
+ seen.add(r.path)
152
+ yield new_dist_class(r.path, metadata=metadata,
153
+ env=self)
154
+ elif self._include_egg and entry.endswith(('.egg-info',
155
+ '.egg')):
156
+ logger.debug('Found %s', r.path)
157
+ seen.add(r.path)
158
+ yield old_dist_class(r.path, self)
159
+ except Exception as e:
160
+ msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s'
161
+ logger.warning(msg, r.path, e)
162
+ import warnings
163
+ warnings.warn(msg % (r.path, e), stacklevel=2)
164
+
165
+ def _generate_cache(self):
166
+ """
167
+ Scan the path for distributions and populate the cache with
168
+ those that are found.
169
+ """
170
+ gen_dist = not self._cache.generated
171
+ gen_egg = self._include_egg and not self._cache_egg.generated
172
+ if gen_dist or gen_egg:
173
+ for dist in self._yield_distributions():
174
+ if isinstance(dist, InstalledDistribution):
175
+ self._cache.add(dist)
176
+ else:
177
+ self._cache_egg.add(dist)
178
+
179
+ if gen_dist:
180
+ self._cache.generated = True
181
+ if gen_egg:
182
+ self._cache_egg.generated = True
183
+
184
+ @classmethod
185
+ def distinfo_dirname(cls, name, version):
186
+ """
187
+ The *name* and *version* parameters are converted into their
188
+ filename-escaped form, i.e. any ``'-'`` characters are replaced
189
+ with ``'_'`` other than the one in ``'dist-info'`` and the one
190
+ separating the name from the version number.
191
+
192
+ :parameter name: is converted to a standard distribution name by replacing
193
+ any runs of non- alphanumeric characters with a single
194
+ ``'-'``.
195
+ :type name: string
196
+ :parameter version: is converted to a standard version string. Spaces
197
+ become dots, and all other non-alphanumeric characters
198
+ (except dots) become dashes, with runs of multiple
199
+ dashes condensed to a single dash.
200
+ :type version: string
201
+ :returns: directory name
202
+ :rtype: string"""
203
+ name = name.replace('-', '_')
204
+ return '-'.join([name, version]) + DISTINFO_EXT
205
+
206
+ def get_distributions(self):
207
+ """
208
+ Provides an iterator that looks for distributions and returns
209
+ :class:`InstalledDistribution` or
210
+ :class:`EggInfoDistribution` instances for each one of them.
211
+
212
+ :rtype: iterator of :class:`InstalledDistribution` and
213
+ :class:`EggInfoDistribution` instances
214
+ """
215
+ if not self._cache_enabled:
216
+ for dist in self._yield_distributions():
217
+ yield dist
218
+ else:
219
+ self._generate_cache()
220
+
221
+ for dist in self._cache.path.values():
222
+ yield dist
223
+
224
+ if self._include_egg:
225
+ for dist in self._cache_egg.path.values():
226
+ yield dist
227
+
228
+ def get_distribution(self, name):
229
+ """
230
+ Looks for a named distribution on the path.
231
+
232
+ This function only returns the first result found, as no more than one
233
+ value is expected. If nothing is found, ``None`` is returned.
234
+
235
+ :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
236
+ or ``None``
237
+ """
238
+ result = None
239
+ name = name.lower()
240
+ if not self._cache_enabled:
241
+ for dist in self._yield_distributions():
242
+ if dist.key == name:
243
+ result = dist
244
+ break
245
+ else:
246
+ self._generate_cache()
247
+
248
+ if name in self._cache.name:
249
+ result = self._cache.name[name][0]
250
+ elif self._include_egg and name in self._cache_egg.name:
251
+ result = self._cache_egg.name[name][0]
252
+ return result
253
+
254
+ def provides_distribution(self, name, version=None):
255
+ """
256
+ Iterates over all distributions to find which distributions provide *name*.
257
+ If a *version* is provided, it will be used to filter the results.
258
+
259
+ This function only returns the first result found, since no more than
260
+ one values are expected. If the directory is not found, returns ``None``.
261
+
262
+ :parameter version: a version specifier that indicates the version
263
+ required, conforming to the format in ``PEP-345``
264
+
265
+ :type name: string
266
+ :type version: string
267
+ """
268
+ matcher = None
269
+ if version is not None:
270
+ try:
271
+ matcher = self._scheme.matcher('%s (%s)' % (name, version))
272
+ except ValueError:
273
+ raise DistlibException('invalid name or version: %r, %r' %
274
+ (name, version))
275
+
276
+ for dist in self.get_distributions():
277
+ # We hit a problem on Travis where enum34 was installed and doesn't
278
+ # have a provides attribute ...
279
+ if not hasattr(dist, 'provides'):
280
+ logger.debug('No "provides": %s', dist)
281
+ else:
282
+ provided = dist.provides
283
+
284
+ for p in provided:
285
+ p_name, p_ver = parse_name_and_version(p)
286
+ if matcher is None:
287
+ if p_name == name:
288
+ yield dist
289
+ break
290
+ else:
291
+ if p_name == name and matcher.match(p_ver):
292
+ yield dist
293
+ break
294
+
295
+ def get_file_path(self, name, relative_path):
296
+ """
297
+ Return the path to a resource file.
298
+ """
299
+ dist = self.get_distribution(name)
300
+ if dist is None:
301
+ raise LookupError('no distribution named %r found' % name)
302
+ return dist.get_resource_path(relative_path)
303
+
304
+ def get_exported_entries(self, category, name=None):
305
+ """
306
+ Return all of the exported entries in a particular category.
307
+
308
+ :param category: The category to search for entries.
309
+ :param name: If specified, only entries with that name are returned.
310
+ """
311
+ for dist in self.get_distributions():
312
+ r = dist.exports
313
+ if category in r:
314
+ d = r[category]
315
+ if name is not None:
316
+ if name in d:
317
+ yield d[name]
318
+ else:
319
+ for v in d.values():
320
+ yield v
321
+
322
+
323
+ class Distribution(object):
324
+ """
325
+ A base class for distributions, whether installed or from indexes.
326
+ Either way, it must have some metadata, so that's all that's needed
327
+ for construction.
328
+ """
329
+
330
+ build_time_dependency = False
331
+ """
332
+ Set to True if it's known to be only a build-time dependency (i.e.
333
+ not needed after installation).
334
+ """
335
+
336
+ requested = False
337
+ """A boolean that indicates whether the ``REQUESTED`` metadata file is
338
+ present (in other words, whether the package was installed by user
339
+ request or it was installed as a dependency)."""
340
+
341
+ def __init__(self, metadata):
342
+ """
343
+ Initialise an instance.
344
+ :param metadata: The instance of :class:`Metadata` describing this
345
+ distribution.
346
+ """
347
+ self.metadata = metadata
348
+ self.name = metadata.name
349
+ self.key = self.name.lower() # for case-insensitive comparisons
350
+ self.version = metadata.version
351
+ self.locator = None
352
+ self.digest = None
353
+ self.extras = None # additional features requested
354
+ self.context = None # environment marker overrides
355
+ self.download_urls = set()
356
+ self.digests = {}
357
+
358
+ @property
359
+ def source_url(self):
360
+ """
361
+ The source archive download URL for this distribution.
362
+ """
363
+ return self.metadata.source_url
364
+
365
+ download_url = source_url # Backward compatibility
366
+
367
+ @property
368
+ def name_and_version(self):
369
+ """
370
+ A utility property which displays the name and version in parentheses.
371
+ """
372
+ return '%s (%s)' % (self.name, self.version)
373
+
374
+ @property
375
+ def provides(self):
376
+ """
377
+ A set of distribution names and versions provided by this distribution.
378
+ :return: A set of "name (version)" strings.
379
+ """
380
+ plist = self.metadata.provides
381
+ s = '%s (%s)' % (self.name, self.version)
382
+ if s not in plist:
383
+ plist.append(s)
384
+ return plist
385
+
386
+ def _get_requirements(self, req_attr):
387
+ md = self.metadata
388
+ logger.debug('Getting requirements from metadata %r', md.todict())
389
+ reqts = getattr(md, req_attr)
390
+ return set(md.get_requirements(reqts, extras=self.extras,
391
+ env=self.context))
392
+
393
+ @property
394
+ def run_requires(self):
395
+ return self._get_requirements('run_requires')
396
+
397
+ @property
398
+ def meta_requires(self):
399
+ return self._get_requirements('meta_requires')
400
+
401
+ @property
402
+ def build_requires(self):
403
+ return self._get_requirements('build_requires')
404
+
405
+ @property
406
+ def test_requires(self):
407
+ return self._get_requirements('test_requires')
408
+
409
+ @property
410
+ def dev_requires(self):
411
+ return self._get_requirements('dev_requires')
412
+
413
+ def matches_requirement(self, req):
414
+ """
415
+ Say if this instance matches (fulfills) a requirement.
416
+ :param req: The requirement to match.
417
+ :rtype req: str
418
+ :return: True if it matches, else False.
419
+ """
420
+ # Requirement may contain extras - parse to lose those
421
+ # from what's passed to the matcher
422
+ r = parse_requirement(req)
423
+ scheme = get_scheme(self.metadata.scheme)
424
+ try:
425
+ matcher = scheme.matcher(r.requirement)
426
+ except UnsupportedVersionError:
427
+ # XXX compat-mode if cannot read the version
428
+ logger.warning('could not read version %r - using name only',
429
+ req)
430
+ name = req.split()[0]
431
+ matcher = scheme.matcher(name)
432
+
433
+ name = matcher.key # case-insensitive
434
+
435
+ result = False
436
+ for p in self.provides:
437
+ p_name, p_ver = parse_name_and_version(p)
438
+ if p_name != name:
439
+ continue
440
+ try:
441
+ result = matcher.match(p_ver)
442
+ break
443
+ except UnsupportedVersionError:
444
+ pass
445
+ return result
446
+
447
+ def __repr__(self):
448
+ """
449
+ Return a textual representation of this instance,
450
+ """
451
+ if self.source_url:
452
+ suffix = ' [%s]' % self.source_url
453
+ else:
454
+ suffix = ''
455
+ return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
456
+
457
+ def __eq__(self, other):
458
+ """
459
+ See if this distribution is the same as another.
460
+ :param other: The distribution to compare with. To be equal to one
461
+ another. distributions must have the same type, name,
462
+ version and source_url.
463
+ :return: True if it is the same, else False.
464
+ """
465
+ if type(other) is not type(self):
466
+ result = False
467
+ else:
468
+ result = (self.name == other.name and
469
+ self.version == other.version and
470
+ self.source_url == other.source_url)
471
+ return result
472
+
473
+ def __hash__(self):
474
+ """
475
+ Compute hash in a way which matches the equality test.
476
+ """
477
+ return hash(self.name) + hash(self.version) + hash(self.source_url)
478
+
479
+
480
+ class BaseInstalledDistribution(Distribution):
481
+ """
482
+ This is the base class for installed distributions (whether PEP 376 or
483
+ legacy).
484
+ """
485
+
486
+ hasher = None
487
+
488
+ def __init__(self, metadata, path, env=None):
489
+ """
490
+ Initialise an instance.
491
+ :param metadata: An instance of :class:`Metadata` which describes the
492
+ distribution. This will normally have been initialised
493
+ from a metadata file in the ``path``.
494
+ :param path: The path of the ``.dist-info`` or ``.egg-info``
495
+ directory for the distribution.
496
+ :param env: This is normally the :class:`DistributionPath`
497
+ instance where this distribution was found.
498
+ """
499
+ super(BaseInstalledDistribution, self).__init__(metadata)
500
+ self.path = path
501
+ self.dist_path = env
502
+
503
+ def get_hash(self, data, hasher=None):
504
+ """
505
+ Get the hash of some data, using a particular hash algorithm, if
506
+ specified.
507
+
508
+ :param data: The data to be hashed.
509
+ :type data: bytes
510
+ :param hasher: The name of a hash implementation, supported by hashlib,
511
+ or ``None``. Examples of valid values are ``'sha1'``,
512
+ ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
513
+ ``'sha512'``. If no hasher is specified, the ``hasher``
514
+ attribute of the :class:`InstalledDistribution` instance
515
+ is used. If the hasher is determined to be ``None``, MD5
516
+ is used as the hashing algorithm.
517
+ :returns: The hash of the data. If a hasher was explicitly specified,
518
+ the returned hash will be prefixed with the specified hasher
519
+ followed by '='.
520
+ :rtype: str
521
+ """
522
+ if hasher is None:
523
+ hasher = self.hasher
524
+ if hasher is None:
525
+ hasher = hashlib.md5
526
+ prefix = ''
527
+ else:
528
+ hasher = getattr(hashlib, hasher)
529
+ prefix = '%s=' % self.hasher
530
+ digest = hasher(data).digest()
531
+ digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
532
+ return '%s%s' % (prefix, digest)
533
+
534
+
535
+ class InstalledDistribution(BaseInstalledDistribution):
536
+ """
537
+ Created with the *path* of the ``.dist-info`` directory provided to the
538
+ constructor. It reads the metadata contained in ``pydist.json`` when it is
539
+ instantiated., or uses a passed in Metadata instance (useful for when
540
+ dry-run mode is being used).
541
+ """
542
+
543
+ hasher = 'sha256'
544
+
545
+ def __init__(self, path, metadata=None, env=None):
546
+ self.modules = []
547
+ self.finder = finder = resources.finder_for_path(path)
548
+ if finder is None:
549
+ raise ValueError('finder unavailable for %s' % path)
550
+ if env and env._cache_enabled and path in env._cache.path:
551
+ metadata = env._cache.path[path].metadata
552
+ elif metadata is None:
553
+ r = finder.find(METADATA_FILENAME)
554
+ # Temporary - for Wheel 0.23 support
555
+ if r is None:
556
+ r = finder.find(WHEEL_METADATA_FILENAME)
557
+ # Temporary - for legacy support
558
+ if r is None:
559
+ r = finder.find(LEGACY_METADATA_FILENAME)
560
+ if r is None:
561
+ raise ValueError('no %s found in %s' % (METADATA_FILENAME,
562
+ path))
563
+ with contextlib.closing(r.as_stream()) as stream:
564
+ metadata = Metadata(fileobj=stream, scheme='legacy')
565
+
566
+ super(InstalledDistribution, self).__init__(metadata, path, env)
567
+
568
+ if env and env._cache_enabled:
569
+ env._cache.add(self)
570
+
571
+ r = finder.find('REQUESTED')
572
+ self.requested = r is not None
573
+ p = os.path.join(path, 'top_level.txt')
574
+ if os.path.exists(p):
575
+ with open(p, 'rb') as f:
576
+ data = f.read().decode('utf-8')
577
+ self.modules = data.splitlines()
578
+
579
+ def __repr__(self):
580
+ return '<InstalledDistribution %r %s at %r>' % (
581
+ self.name, self.version, self.path)
582
+
583
+ def __str__(self):
584
+ return "%s %s" % (self.name, self.version)
585
+
586
+ def _get_records(self):
587
+ """
588
+ Get the list of installed files for the distribution
589
+ :return: A list of tuples of path, hash and size. Note that hash and
590
+ size might be ``None`` for some entries. The path is exactly
591
+ as stored in the file (which is as in PEP 376).
592
+ """
593
+ results = []
594
+ r = self.get_distinfo_resource('RECORD')
595
+ with contextlib.closing(r.as_stream()) as stream:
596
+ with CSVReader(stream=stream) as record_reader:
597
+ # Base location is parent dir of .dist-info dir
598
+ #base_location = os.path.dirname(self.path)
599
+ #base_location = os.path.abspath(base_location)
600
+ for row in record_reader:
601
+ missing = [None for i in range(len(row), 3)]
602
+ path, checksum, size = row + missing
603
+ #if not os.path.isabs(path):
604
+ # path = path.replace('/', os.sep)
605
+ # path = os.path.join(base_location, path)
606
+ results.append((path, checksum, size))
607
+ return results
608
+
609
+ @cached_property
610
+ def exports(self):
611
+ """
612
+ Return the information exported by this distribution.
613
+ :return: A dictionary of exports, mapping an export category to a dict
614
+ of :class:`ExportEntry` instances describing the individual
615
+ export entries, and keyed by name.
616
+ """
617
+ result = {}
618
+ r = self.get_distinfo_resource(EXPORTS_FILENAME)
619
+ if r:
620
+ result = self.read_exports()
621
+ return result
622
+
623
+ def read_exports(self):
624
+ """
625
+ Read exports data from a file in .ini format.
626
+
627
+ :return: A dictionary of exports, mapping an export category to a list
628
+ of :class:`ExportEntry` instances describing the individual
629
+ export entries.
630
+ """
631
+ result = {}
632
+ r = self.get_distinfo_resource(EXPORTS_FILENAME)
633
+ if r:
634
+ with contextlib.closing(r.as_stream()) as stream:
635
+ result = read_exports(stream)
636
+ return result
637
+
638
+ def write_exports(self, exports):
639
+ """
640
+ Write a dictionary of exports to a file in .ini format.
641
+ :param exports: A dictionary of exports, mapping an export category to
642
+ a list of :class:`ExportEntry` instances describing the
643
+ individual export entries.
644
+ """
645
+ rf = self.get_distinfo_file(EXPORTS_FILENAME)
646
+ with open(rf, 'w') as f:
647
+ write_exports(exports, f)
648
+
649
+ def get_resource_path(self, relative_path):
650
+ """
651
+ NOTE: This API may change in the future.
652
+
653
+ Return the absolute path to a resource file with the given relative
654
+ path.
655
+
656
+ :param relative_path: The path, relative to .dist-info, of the resource
657
+ of interest.
658
+ :return: The absolute path where the resource is to be found.
659
+ """
660
+ r = self.get_distinfo_resource('RESOURCES')
661
+ with contextlib.closing(r.as_stream()) as stream:
662
+ with CSVReader(stream=stream) as resources_reader:
663
+ for relative, destination in resources_reader:
664
+ if relative == relative_path:
665
+ return destination
666
+ raise KeyError('no resource file with relative path %r '
667
+ 'is installed' % relative_path)
668
+
669
+ def list_installed_files(self):
670
+ """
671
+ Iterates over the ``RECORD`` entries and returns a tuple
672
+ ``(path, hash, size)`` for each line.
673
+
674
+ :returns: iterator of (path, hash, size)
675
+ """
676
+ for result in self._get_records():
677
+ yield result
678
+
679
+ def write_installed_files(self, paths, prefix, dry_run=False):
680
+ """
681
+ Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
682
+ existing ``RECORD`` file is silently overwritten.
683
+
684
+ prefix is used to determine when to write absolute paths.
685
+ """
686
+ prefix = os.path.join(prefix, '')
687
+ base = os.path.dirname(self.path)
688
+ base_under_prefix = base.startswith(prefix)
689
+ base = os.path.join(base, '')
690
+ record_path = self.get_distinfo_file('RECORD')
691
+ logger.info('creating %s', record_path)
692
+ if dry_run:
693
+ return None
694
+ with CSVWriter(record_path) as writer:
695
+ for path in paths:
696
+ if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
697
+ # do not put size and hash, as in PEP-376
698
+ hash_value = size = ''
699
+ else:
700
+ size = '%d' % os.path.getsize(path)
701
+ with open(path, 'rb') as fp:
702
+ hash_value = self.get_hash(fp.read())
703
+ if path.startswith(base) or (base_under_prefix and
704
+ path.startswith(prefix)):
705
+ path = os.path.relpath(path, base)
706
+ writer.writerow((path, hash_value, size))
707
+
708
+ # add the RECORD file itself
709
+ if record_path.startswith(base):
710
+ record_path = os.path.relpath(record_path, base)
711
+ writer.writerow((record_path, '', ''))
712
+ return record_path
713
+
714
+ def check_installed_files(self):
715
+ """
716
+ Checks that the hashes and sizes of the files in ``RECORD`` are
717
+ matched by the files themselves. Returns a (possibly empty) list of
718
+ mismatches. Each entry in the mismatch list will be a tuple consisting
719
+ of the path, 'exists', 'size' or 'hash' according to what didn't match
720
+ (existence is checked first, then size, then hash), the expected
721
+ value and the actual value.
722
+ """
723
+ mismatches = []
724
+ base = os.path.dirname(self.path)
725
+ record_path = self.get_distinfo_file('RECORD')
726
+ for path, hash_value, size in self.list_installed_files():
727
+ if not os.path.isabs(path):
728
+ path = os.path.join(base, path)
729
+ if path == record_path:
730
+ continue
731
+ if not os.path.exists(path):
732
+ mismatches.append((path, 'exists', True, False))
733
+ elif os.path.isfile(path):
734
+ actual_size = str(os.path.getsize(path))
735
+ if size and actual_size != size:
736
+ mismatches.append((path, 'size', size, actual_size))
737
+ elif hash_value:
738
+ if '=' in hash_value:
739
+ hasher = hash_value.split('=', 1)[0]
740
+ else:
741
+ hasher = None
742
+
743
+ with open(path, 'rb') as f:
744
+ actual_hash = self.get_hash(f.read(), hasher)
745
+ if actual_hash != hash_value:
746
+ mismatches.append((path, 'hash', hash_value, actual_hash))
747
+ return mismatches
748
+
749
+ @cached_property
750
+ def shared_locations(self):
751
+ """
752
+ A dictionary of shared locations whose keys are in the set 'prefix',
753
+ 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
754
+ The corresponding value is the absolute path of that category for
755
+ this distribution, and takes into account any paths selected by the
756
+ user at installation time (e.g. via command-line arguments). In the
757
+ case of the 'namespace' key, this would be a list of absolute paths
758
+ for the roots of namespace packages in this distribution.
759
+
760
+ The first time this property is accessed, the relevant information is
761
+ read from the SHARED file in the .dist-info directory.
762
+ """
763
+ result = {}
764
+ shared_path = os.path.join(self.path, 'SHARED')
765
+ if os.path.isfile(shared_path):
766
+ with codecs.open(shared_path, 'r', encoding='utf-8') as f:
767
+ lines = f.read().splitlines()
768
+ for line in lines:
769
+ key, value = line.split('=', 1)
770
+ if key == 'namespace':
771
+ result.setdefault(key, []).append(value)
772
+ else:
773
+ result[key] = value
774
+ return result
775
+
776
+ def write_shared_locations(self, paths, dry_run=False):
777
+ """
778
+ Write shared location information to the SHARED file in .dist-info.
779
+ :param paths: A dictionary as described in the documentation for
780
+ :meth:`shared_locations`.
781
+ :param dry_run: If True, the action is logged but no file is actually
782
+ written.
783
+ :return: The path of the file written to.
784
+ """
785
+ shared_path = os.path.join(self.path, 'SHARED')
786
+ logger.info('creating %s', shared_path)
787
+ if dry_run:
788
+ return None
789
+ lines = []
790
+ for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
791
+ path = paths[key]
792
+ if os.path.isdir(paths[key]):
793
+ lines.append('%s=%s' % (key, path))
794
+ for ns in paths.get('namespace', ()):
795
+ lines.append('namespace=%s' % ns)
796
+
797
+ with codecs.open(shared_path, 'w', encoding='utf-8') as f:
798
+ f.write('\n'.join(lines))
799
+ return shared_path
800
+
801
+ def get_distinfo_resource(self, path):
802
+ if path not in DIST_FILES:
803
+ raise DistlibException('invalid path for a dist-info file: '
804
+ '%r at %r' % (path, self.path))
805
+ finder = resources.finder_for_path(self.path)
806
+ if finder is None:
807
+ raise DistlibException('Unable to get a finder for %s' % self.path)
808
+ return finder.find(path)
809
+
810
+ def get_distinfo_file(self, path):
811
+ """
812
+ Returns a path located under the ``.dist-info`` directory. Returns a
813
+ string representing the path.
814
+
815
+ :parameter path: a ``'/'``-separated path relative to the
816
+ ``.dist-info`` directory or an absolute path;
817
+ If *path* is an absolute path and doesn't start
818
+ with the ``.dist-info`` directory path,
819
+ a :class:`DistlibException` is raised
820
+ :type path: str
821
+ :rtype: str
822
+ """
823
+ # Check if it is an absolute path # XXX use relpath, add tests
824
+ if path.find(os.sep) >= 0:
825
+ # it's an absolute path?
826
+ distinfo_dirname, path = path.split(os.sep)[-2:]
827
+ if distinfo_dirname != self.path.split(os.sep)[-1]:
828
+ raise DistlibException(
829
+ 'dist-info file %r does not belong to the %r %s '
830
+ 'distribution' % (path, self.name, self.version))
831
+
832
+ # The file must be relative
833
+ if path not in DIST_FILES:
834
+ raise DistlibException('invalid path for a dist-info file: '
835
+ '%r at %r' % (path, self.path))
836
+
837
+ return os.path.join(self.path, path)
838
+
839
+ def list_distinfo_files(self):
840
+ """
841
+ Iterates over the ``RECORD`` entries and returns paths for each line if
842
+ the path is pointing to a file located in the ``.dist-info`` directory
843
+ or one of its subdirectories.
844
+
845
+ :returns: iterator of paths
846
+ """
847
+ base = os.path.dirname(self.path)
848
+ for path, checksum, size in self._get_records():
849
+ # XXX add separator or use real relpath algo
850
+ if not os.path.isabs(path):
851
+ path = os.path.join(base, path)
852
+ if path.startswith(self.path):
853
+ yield path
854
+
855
+ def __eq__(self, other):
856
+ return (isinstance(other, InstalledDistribution) and
857
+ self.path == other.path)
858
+
859
+ # See http://docs.python.org/reference/datamodel#object.__hash__
860
+ __hash__ = object.__hash__
861
+
862
+
863
+ class EggInfoDistribution(BaseInstalledDistribution):
864
+ """Created with the *path* of the ``.egg-info`` directory or file provided
865
+ to the constructor. It reads the metadata contained in the file itself, or
866
+ if the given path happens to be a directory, the metadata is read from the
867
+ file ``PKG-INFO`` under that directory."""
868
+
869
+ requested = True # as we have no way of knowing, assume it was
870
+ shared_locations = {}
871
+
872
+ def __init__(self, path, env=None):
873
+ def set_name_and_version(s, n, v):
874
+ s.name = n
875
+ s.key = n.lower() # for case-insensitive comparisons
876
+ s.version = v
877
+
878
+ self.path = path
879
+ self.dist_path = env
880
+ if env and env._cache_enabled and path in env._cache_egg.path:
881
+ metadata = env._cache_egg.path[path].metadata
882
+ set_name_and_version(self, metadata.name, metadata.version)
883
+ else:
884
+ metadata = self._get_metadata(path)
885
+
886
+ # Need to be set before caching
887
+ set_name_and_version(self, metadata.name, metadata.version)
888
+
889
+ if env and env._cache_enabled:
890
+ env._cache_egg.add(self)
891
+ super(EggInfoDistribution, self).__init__(metadata, path, env)
892
+
893
+ def _get_metadata(self, path):
894
+ requires = None
895
+
896
+ def parse_requires_data(data):
897
+ """Create a list of dependencies from a requires.txt file.
898
+
899
+ *data*: the contents of a setuptools-produced requires.txt file.
900
+ """
901
+ reqs = []
902
+ lines = data.splitlines()
903
+ for line in lines:
904
+ line = line.strip()
905
+ if line.startswith('['):
906
+ logger.warning('Unexpected line: quitting requirement scan: %r',
907
+ line)
908
+ break
909
+ r = parse_requirement(line)
910
+ if not r:
911
+ logger.warning('Not recognised as a requirement: %r', line)
912
+ continue
913
+ if r.extras:
914
+ logger.warning('extra requirements in requires.txt are '
915
+ 'not supported')
916
+ if not r.constraints:
917
+ reqs.append(r.name)
918
+ else:
919
+ cons = ', '.join('%s%s' % c for c in r.constraints)
920
+ reqs.append('%s (%s)' % (r.name, cons))
921
+ return reqs
922
+
923
+ def parse_requires_path(req_path):
924
+ """Create a list of dependencies from a requires.txt file.
925
+
926
+ *req_path*: the path to a setuptools-produced requires.txt file.
927
+ """
928
+
929
+ reqs = []
930
+ try:
931
+ with codecs.open(req_path, 'r', 'utf-8') as fp:
932
+ reqs = parse_requires_data(fp.read())
933
+ except IOError:
934
+ pass
935
+ return reqs
936
+
937
+ tl_path = tl_data = None
938
+ if path.endswith('.egg'):
939
+ if os.path.isdir(path):
940
+ p = os.path.join(path, 'EGG-INFO')
941
+ meta_path = os.path.join(p, 'PKG-INFO')
942
+ metadata = Metadata(path=meta_path, scheme='legacy')
943
+ req_path = os.path.join(p, 'requires.txt')
944
+ tl_path = os.path.join(p, 'top_level.txt')
945
+ requires = parse_requires_path(req_path)
946
+ else:
947
+ # FIXME handle the case where zipfile is not available
948
+ zipf = zipimport.zipimporter(path)
949
+ fileobj = StringIO(
950
+ zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
951
+ metadata = Metadata(fileobj=fileobj, scheme='legacy')
952
+ try:
953
+ data = zipf.get_data('EGG-INFO/requires.txt')
954
+ tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
955
+ requires = parse_requires_data(data.decode('utf-8'))
956
+ except IOError:
957
+ requires = None
958
+ elif path.endswith('.egg-info'):
959
+ if os.path.isdir(path):
960
+ req_path = os.path.join(path, 'requires.txt')
961
+ requires = parse_requires_path(req_path)
962
+ path = os.path.join(path, 'PKG-INFO')
963
+ tl_path = os.path.join(path, 'top_level.txt')
964
+ metadata = Metadata(path=path, scheme='legacy')
965
+ else:
966
+ raise DistlibException('path must end with .egg-info or .egg, '
967
+ 'got %r' % path)
968
+
969
+ if requires:
970
+ metadata.add_requirements(requires)
971
+ # look for top-level modules in top_level.txt, if present
972
+ if tl_data is None:
973
+ if tl_path is not None and os.path.exists(tl_path):
974
+ with open(tl_path, 'rb') as f:
975
+ tl_data = f.read().decode('utf-8')
976
+ if not tl_data:
977
+ tl_data = []
978
+ else:
979
+ tl_data = tl_data.splitlines()
980
+ self.modules = tl_data
981
+ return metadata
982
+
983
+ def __repr__(self):
984
+ return '<EggInfoDistribution %r %s at %r>' % (
985
+ self.name, self.version, self.path)
986
+
987
+ def __str__(self):
988
+ return "%s %s" % (self.name, self.version)
989
+
990
+ def check_installed_files(self):
991
+ """
992
+ Checks that the hashes and sizes of the files in ``RECORD`` are
993
+ matched by the files themselves. Returns a (possibly empty) list of
994
+ mismatches. Each entry in the mismatch list will be a tuple consisting
995
+ of the path, 'exists', 'size' or 'hash' according to what didn't match
996
+ (existence is checked first, then size, then hash), the expected
997
+ value and the actual value.
998
+ """
999
+ mismatches = []
1000
+ record_path = os.path.join(self.path, 'installed-files.txt')
1001
+ if os.path.exists(record_path):
1002
+ for path, _, _ in self.list_installed_files():
1003
+ if path == record_path:
1004
+ continue
1005
+ if not os.path.exists(path):
1006
+ mismatches.append((path, 'exists', True, False))
1007
+ return mismatches
1008
+
1009
+ def list_installed_files(self):
1010
+ """
1011
+ Iterates over the ``installed-files.txt`` entries and returns a tuple
1012
+ ``(path, hash, size)`` for each line.
1013
+
1014
+ :returns: a list of (path, hash, size)
1015
+ """
1016
+
1017
+ def _md5(path):
1018
+ f = open(path, 'rb')
1019
+ try:
1020
+ content = f.read()
1021
+ finally:
1022
+ f.close()
1023
+ return hashlib.md5(content).hexdigest()
1024
+
1025
+ def _size(path):
1026
+ return os.stat(path).st_size
1027
+
1028
+ record_path = os.path.join(self.path, 'installed-files.txt')
1029
+ result = []
1030
+ if os.path.exists(record_path):
1031
+ with codecs.open(record_path, 'r', encoding='utf-8') as f:
1032
+ for line in f:
1033
+ line = line.strip()
1034
+ p = os.path.normpath(os.path.join(self.path, line))
1035
+ # "./" is present as a marker between installed files
1036
+ # and installation metadata files
1037
+ if not os.path.exists(p):
1038
+ logger.warning('Non-existent file: %s', p)
1039
+ if p.endswith(('.pyc', '.pyo')):
1040
+ continue
1041
+ #otherwise fall through and fail
1042
+ if not os.path.isdir(p):
1043
+ result.append((p, _md5(p), _size(p)))
1044
+ result.append((record_path, None, None))
1045
+ return result
1046
+
1047
+ def list_distinfo_files(self, absolute=False):
1048
+ """
1049
+ Iterates over the ``installed-files.txt`` entries and returns paths for
1050
+ each line if the path is pointing to a file located in the
1051
+ ``.egg-info`` directory or one of its subdirectories.
1052
+
1053
+ :parameter absolute: If *absolute* is ``True``, each returned path is
1054
+ transformed into a local absolute path. Otherwise the
1055
+ raw value from ``installed-files.txt`` is returned.
1056
+ :type absolute: boolean
1057
+ :returns: iterator of paths
1058
+ """
1059
+ record_path = os.path.join(self.path, 'installed-files.txt')
1060
+ if os.path.exists(record_path):
1061
+ skip = True
1062
+ with codecs.open(record_path, 'r', encoding='utf-8') as f:
1063
+ for line in f:
1064
+ line = line.strip()
1065
+ if line == './':
1066
+ skip = False
1067
+ continue
1068
+ if not skip:
1069
+ p = os.path.normpath(os.path.join(self.path, line))
1070
+ if p.startswith(self.path):
1071
+ if absolute:
1072
+ yield p
1073
+ else:
1074
+ yield line
1075
+
1076
+ def __eq__(self, other):
1077
+ return (isinstance(other, EggInfoDistribution) and
1078
+ self.path == other.path)
1079
+
1080
+ # See http://docs.python.org/reference/datamodel#object.__hash__
1081
+ __hash__ = object.__hash__
1082
+
1083
+ new_dist_class = InstalledDistribution
1084
+ old_dist_class = EggInfoDistribution
1085
+
1086
+
1087
+ class DependencyGraph(object):
1088
+ """
1089
+ Represents a dependency graph between distributions.
1090
+
1091
+ The dependency relationships are stored in an ``adjacency_list`` that maps
1092
+ distributions to a list of ``(other, label)`` tuples where ``other``
1093
+ is a distribution and the edge is labeled with ``label`` (i.e. the version
1094
+ specifier, if such was provided). Also, for more efficient traversal, for
1095
+ every distribution ``x``, a list of predecessors is kept in
1096
+ ``reverse_list[x]``. An edge from distribution ``a`` to
1097
+ distribution ``b`` means that ``a`` depends on ``b``. If any missing
1098
+ dependencies are found, they are stored in ``missing``, which is a
1099
+ dictionary that maps distributions to a list of requirements that were not
1100
+ provided by any other distributions.
1101
+ """
1102
+
1103
+ def __init__(self):
1104
+ self.adjacency_list = {}
1105
+ self.reverse_list = {}
1106
+ self.missing = {}
1107
+
1108
+ def add_distribution(self, distribution):
1109
+ """Add the *distribution* to the graph.
1110
+
1111
+ :type distribution: :class:`distutils2.database.InstalledDistribution`
1112
+ or :class:`distutils2.database.EggInfoDistribution`
1113
+ """
1114
+ self.adjacency_list[distribution] = []
1115
+ self.reverse_list[distribution] = []
1116
+ #self.missing[distribution] = []
1117
+
1118
+ def add_edge(self, x, y, label=None):
1119
+ """Add an edge from distribution *x* to distribution *y* with the given
1120
+ *label*.
1121
+
1122
+ :type x: :class:`distutils2.database.InstalledDistribution` or
1123
+ :class:`distutils2.database.EggInfoDistribution`
1124
+ :type y: :class:`distutils2.database.InstalledDistribution` or
1125
+ :class:`distutils2.database.EggInfoDistribution`
1126
+ :type label: ``str`` or ``None``
1127
+ """
1128
+ self.adjacency_list[x].append((y, label))
1129
+ # multiple edges are allowed, so be careful
1130
+ if x not in self.reverse_list[y]:
1131
+ self.reverse_list[y].append(x)
1132
+
1133
+ def add_missing(self, distribution, requirement):
1134
+ """
1135
+ Add a missing *requirement* for the given *distribution*.
1136
+
1137
+ :type distribution: :class:`distutils2.database.InstalledDistribution`
1138
+ or :class:`distutils2.database.EggInfoDistribution`
1139
+ :type requirement: ``str``
1140
+ """
1141
+ logger.debug('%s missing %r', distribution, requirement)
1142
+ self.missing.setdefault(distribution, []).append(requirement)
1143
+
1144
+ def _repr_dist(self, dist):
1145
+ return '%s %s' % (dist.name, dist.version)
1146
+
1147
+ def repr_node(self, dist, level=1):
1148
+ """Prints only a subgraph"""
1149
+ output = [self._repr_dist(dist)]
1150
+ for other, label in self.adjacency_list[dist]:
1151
+ dist = self._repr_dist(other)
1152
+ if label is not None:
1153
+ dist = '%s [%s]' % (dist, label)
1154
+ output.append(' ' * level + str(dist))
1155
+ suboutput = self.repr_node(other, level + 1)
1156
+ subs = suboutput.split('\n')
1157
+ output.extend(subs[1:])
1158
+ return '\n'.join(output)
1159
+
1160
+ def to_dot(self, f, skip_disconnected=True):
1161
+ """Writes a DOT output for the graph to the provided file *f*.
1162
+
1163
+ If *skip_disconnected* is set to ``True``, then all distributions
1164
+ that are not dependent on any other distribution are skipped.
1165
+
1166
+ :type f: has to support ``file``-like operations
1167
+ :type skip_disconnected: ``bool``
1168
+ """
1169
+ disconnected = []
1170
+
1171
+ f.write("digraph dependencies {\n")
1172
+ for dist, adjs in self.adjacency_list.items():
1173
+ if len(adjs) == 0 and not skip_disconnected:
1174
+ disconnected.append(dist)
1175
+ for other, label in adjs:
1176
+ if not label is None:
1177
+ f.write('"%s" -> "%s" [label="%s"]\n' %
1178
+ (dist.name, other.name, label))
1179
+ else:
1180
+ f.write('"%s" -> "%s"\n' % (dist.name, other.name))
1181
+ if not skip_disconnected and len(disconnected) > 0:
1182
+ f.write('subgraph disconnected {\n')
1183
+ f.write('label = "Disconnected"\n')
1184
+ f.write('bgcolor = red\n')
1185
+
1186
+ for dist in disconnected:
1187
+ f.write('"%s"' % dist.name)
1188
+ f.write('\n')
1189
+ f.write('}\n')
1190
+ f.write('}\n')
1191
+
1192
+ def topological_sort(self):
1193
+ """
1194
+ Perform a topological sort of the graph.
1195
+ :return: A tuple, the first element of which is a topologically sorted
1196
+ list of distributions, and the second element of which is a
1197
+ list of distributions that cannot be sorted because they have
1198
+ circular dependencies and so form a cycle.
1199
+ """
1200
+ result = []
1201
+ # Make a shallow copy of the adjacency list
1202
+ alist = {}
1203
+ for k, v in self.adjacency_list.items():
1204
+ alist[k] = v[:]
1205
+ while True:
1206
+ # See what we can remove in this run
1207
+ to_remove = []
1208
+ for k, v in list(alist.items())[:]:
1209
+ if not v:
1210
+ to_remove.append(k)
1211
+ del alist[k]
1212
+ if not to_remove:
1213
+ # What's left in alist (if anything) is a cycle.
1214
+ break
1215
+ # Remove from the adjacency list of others
1216
+ for k, v in alist.items():
1217
+ alist[k] = [(d, r) for d, r in v if d not in to_remove]
1218
+ logger.debug('Moving to result: %s',
1219
+ ['%s (%s)' % (d.name, d.version) for d in to_remove])
1220
+ result.extend(to_remove)
1221
+ return result, list(alist.keys())
1222
+
1223
+ def __repr__(self):
1224
+ """Representation of the graph"""
1225
+ output = []
1226
+ for dist, adjs in self.adjacency_list.items():
1227
+ output.append(self.repr_node(dist))
1228
+ return '\n'.join(output)
1229
+
1230
+
1231
+ def make_graph(dists, scheme='default'):
1232
+ """Makes a dependency graph from the given distributions.
1233
+
1234
+ :parameter dists: a list of distributions
1235
+ :type dists: list of :class:`distutils2.database.InstalledDistribution` and
1236
+ :class:`distutils2.database.EggInfoDistribution` instances
1237
+ :rtype: a :class:`DependencyGraph` instance
1238
+ """
1239
+ scheme = get_scheme(scheme)
1240
+ graph = DependencyGraph()
1241
+ provided = {} # maps names to lists of (version, dist) tuples
1242
+
1243
+ # first, build the graph and find out what's provided
1244
+ for dist in dists:
1245
+ graph.add_distribution(dist)
1246
+
1247
+ for p in dist.provides:
1248
+ name, version = parse_name_and_version(p)
1249
+ logger.debug('Add to provided: %s, %s, %s', name, version, dist)
1250
+ provided.setdefault(name, []).append((version, dist))
1251
+
1252
+ # now make the edges
1253
+ for dist in dists:
1254
+ requires = (dist.run_requires | dist.meta_requires |
1255
+ dist.build_requires | dist.dev_requires)
1256
+ for req in requires:
1257
+ try:
1258
+ matcher = scheme.matcher(req)
1259
+ except UnsupportedVersionError:
1260
+ # XXX compat-mode if cannot read the version
1261
+ logger.warning('could not read version %r - using name only',
1262
+ req)
1263
+ name = req.split()[0]
1264
+ matcher = scheme.matcher(name)
1265
+
1266
+ name = matcher.key # case-insensitive
1267
+
1268
+ matched = False
1269
+ if name in provided:
1270
+ for version, provider in provided[name]:
1271
+ try:
1272
+ match = matcher.match(version)
1273
+ except UnsupportedVersionError:
1274
+ match = False
1275
+
1276
+ if match:
1277
+ graph.add_edge(dist, provider, req)
1278
+ matched = True
1279
+ break
1280
+ if not matched:
1281
+ graph.add_missing(dist, req)
1282
+ return graph
1283
+
1284
+
1285
+ def get_dependent_dists(dists, dist):
1286
+ """Recursively generate a list of distributions from *dists* that are
1287
+ dependent on *dist*.
1288
+
1289
+ :param dists: a list of distributions
1290
+ :param dist: a distribution, member of *dists* for which we are interested
1291
+ """
1292
+ if dist not in dists:
1293
+ raise DistlibException('given distribution %r is not a member '
1294
+ 'of the list' % dist.name)
1295
+ graph = make_graph(dists)
1296
+
1297
+ dep = [dist] # dependent distributions
1298
+ todo = graph.reverse_list[dist] # list of nodes we should inspect
1299
+
1300
+ while todo:
1301
+ d = todo.pop()
1302
+ dep.append(d)
1303
+ for succ in graph.reverse_list[d]:
1304
+ if succ not in dep:
1305
+ todo.append(succ)
1306
+
1307
+ dep.pop(0) # remove dist from dep, was there to prevent infinite loops
1308
+ return dep
1309
+
1310
+
1311
+ def get_required_dists(dists, dist):
1312
+ """Recursively generate a list of distributions from *dists* that are
1313
+ required by *dist*.
1314
+
1315
+ :param dists: a list of distributions
1316
+ :param dist: a distribution, member of *dists* for which we are interested
1317
+ """
1318
+ if dist not in dists:
1319
+ raise DistlibException('given distribution %r is not a member '
1320
+ 'of the list' % dist.name)
1321
+ graph = make_graph(dists)
1322
+
1323
+ req = [] # required distributions
1324
+ todo = graph.adjacency_list[dist] # list of nodes we should inspect
1325
+
1326
+ while todo:
1327
+ d = todo.pop()[0]
1328
+ req.append(d)
1329
+ for pred in graph.adjacency_list[d]:
1330
+ if pred not in req:
1331
+ todo.append(pred)
1332
+
1333
+ return req
1334
+
1335
+
1336
+ def make_dist(name, version, **kwargs):
1337
+ """
1338
+ A convenience method for making a dist given just a name and version.
1339
+ """
1340
+ summary = kwargs.pop('summary', 'Placeholder for summary')
1341
+ md = Metadata(**kwargs)
1342
+ md.name = name
1343
+ md.version = version
1344
+ md.summary = summary or 'Placeholder for summary'
1345
+ return Distribution(md)
venv/lib/python3.10/site-packages/pip/_vendor/distlib/index.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2013 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+ import hashlib
8
+ import logging
9
+ import os
10
+ import shutil
11
+ import subprocess
12
+ import tempfile
13
+ try:
14
+ from threading import Thread
15
+ except ImportError:
16
+ from dummy_threading import Thread
17
+
18
+ from . import DistlibException
19
+ from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
20
+ urlparse, build_opener, string_types)
21
+ from .util import zip_dir, ServerProxy
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+ DEFAULT_INDEX = 'https://pypi.org/pypi'
26
+ DEFAULT_REALM = 'pypi'
27
+
28
+ class PackageIndex(object):
29
+ """
30
+ This class represents a package index compatible with PyPI, the Python
31
+ Package Index.
32
+ """
33
+
34
+ boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
35
+
36
+ def __init__(self, url=None):
37
+ """
38
+ Initialise an instance.
39
+
40
+ :param url: The URL of the index. If not specified, the URL for PyPI is
41
+ used.
42
+ """
43
+ self.url = url or DEFAULT_INDEX
44
+ self.read_configuration()
45
+ scheme, netloc, path, params, query, frag = urlparse(self.url)
46
+ if params or query or frag or scheme not in ('http', 'https'):
47
+ raise DistlibException('invalid repository: %s' % self.url)
48
+ self.password_handler = None
49
+ self.ssl_verifier = None
50
+ self.gpg = None
51
+ self.gpg_home = None
52
+ with open(os.devnull, 'w') as sink:
53
+ # Use gpg by default rather than gpg2, as gpg2 insists on
54
+ # prompting for passwords
55
+ for s in ('gpg', 'gpg2'):
56
+ try:
57
+ rc = subprocess.check_call([s, '--version'], stdout=sink,
58
+ stderr=sink)
59
+ if rc == 0:
60
+ self.gpg = s
61
+ break
62
+ except OSError:
63
+ pass
64
+
65
+ def _get_pypirc_command(self):
66
+ """
67
+ Get the distutils command for interacting with PyPI configurations.
68
+ :return: the command.
69
+ """
70
+ from .util import _get_pypirc_command as cmd
71
+ return cmd()
72
+
73
+ def read_configuration(self):
74
+ """
75
+ Read the PyPI access configuration as supported by distutils. This populates
76
+ ``username``, ``password``, ``realm`` and ``url`` attributes from the
77
+ configuration.
78
+ """
79
+ from .util import _load_pypirc
80
+ cfg = _load_pypirc(self)
81
+ self.username = cfg.get('username')
82
+ self.password = cfg.get('password')
83
+ self.realm = cfg.get('realm', 'pypi')
84
+ self.url = cfg.get('repository', self.url)
85
+
86
+ def save_configuration(self):
87
+ """
88
+ Save the PyPI access configuration. You must have set ``username`` and
89
+ ``password`` attributes before calling this method.
90
+ """
91
+ self.check_credentials()
92
+ from .util import _store_pypirc
93
+ _store_pypirc(self)
94
+
95
+ def check_credentials(self):
96
+ """
97
+ Check that ``username`` and ``password`` have been set, and raise an
98
+ exception if not.
99
+ """
100
+ if self.username is None or self.password is None:
101
+ raise DistlibException('username and password must be set')
102
+ pm = HTTPPasswordMgr()
103
+ _, netloc, _, _, _, _ = urlparse(self.url)
104
+ pm.add_password(self.realm, netloc, self.username, self.password)
105
+ self.password_handler = HTTPBasicAuthHandler(pm)
106
+
107
+ def register(self, metadata):
108
+ """
109
+ Register a distribution on PyPI, using the provided metadata.
110
+
111
+ :param metadata: A :class:`Metadata` instance defining at least a name
112
+ and version number for the distribution to be
113
+ registered.
114
+ :return: The HTTP response received from PyPI upon submission of the
115
+ request.
116
+ """
117
+ self.check_credentials()
118
+ metadata.validate()
119
+ d = metadata.todict()
120
+ d[':action'] = 'verify'
121
+ request = self.encode_request(d.items(), [])
122
+ response = self.send_request(request)
123
+ d[':action'] = 'submit'
124
+ request = self.encode_request(d.items(), [])
125
+ return self.send_request(request)
126
+
127
+ def _reader(self, name, stream, outbuf):
128
+ """
129
+ Thread runner for reading lines of from a subprocess into a buffer.
130
+
131
+ :param name: The logical name of the stream (used for logging only).
132
+ :param stream: The stream to read from. This will typically a pipe
133
+ connected to the output stream of a subprocess.
134
+ :param outbuf: The list to append the read lines to.
135
+ """
136
+ while True:
137
+ s = stream.readline()
138
+ if not s:
139
+ break
140
+ s = s.decode('utf-8').rstrip()
141
+ outbuf.append(s)
142
+ logger.debug('%s: %s' % (name, s))
143
+ stream.close()
144
+
145
+ def get_sign_command(self, filename, signer, sign_password,
146
+ keystore=None):
147
+ """
148
+ Return a suitable command for signing a file.
149
+
150
+ :param filename: The pathname to the file to be signed.
151
+ :param signer: The identifier of the signer of the file.
152
+ :param sign_password: The passphrase for the signer's
153
+ private key used for signing.
154
+ :param keystore: The path to a directory which contains the keys
155
+ used in verification. If not specified, the
156
+ instance's ``gpg_home`` attribute is used instead.
157
+ :return: The signing command as a list suitable to be
158
+ passed to :class:`subprocess.Popen`.
159
+ """
160
+ cmd = [self.gpg, '--status-fd', '2', '--no-tty']
161
+ if keystore is None:
162
+ keystore = self.gpg_home
163
+ if keystore:
164
+ cmd.extend(['--homedir', keystore])
165
+ if sign_password is not None:
166
+ cmd.extend(['--batch', '--passphrase-fd', '0'])
167
+ td = tempfile.mkdtemp()
168
+ sf = os.path.join(td, os.path.basename(filename) + '.asc')
169
+ cmd.extend(['--detach-sign', '--armor', '--local-user',
170
+ signer, '--output', sf, filename])
171
+ logger.debug('invoking: %s', ' '.join(cmd))
172
+ return cmd, sf
173
+
174
+ def run_command(self, cmd, input_data=None):
175
+ """
176
+ Run a command in a child process , passing it any input data specified.
177
+
178
+ :param cmd: The command to run.
179
+ :param input_data: If specified, this must be a byte string containing
180
+ data to be sent to the child process.
181
+ :return: A tuple consisting of the subprocess' exit code, a list of
182
+ lines read from the subprocess' ``stdout``, and a list of
183
+ lines read from the subprocess' ``stderr``.
184
+ """
185
+ kwargs = {
186
+ 'stdout': subprocess.PIPE,
187
+ 'stderr': subprocess.PIPE,
188
+ }
189
+ if input_data is not None:
190
+ kwargs['stdin'] = subprocess.PIPE
191
+ stdout = []
192
+ stderr = []
193
+ p = subprocess.Popen(cmd, **kwargs)
194
+ # We don't use communicate() here because we may need to
195
+ # get clever with interacting with the command
196
+ t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
197
+ t1.start()
198
+ t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
199
+ t2.start()
200
+ if input_data is not None:
201
+ p.stdin.write(input_data)
202
+ p.stdin.close()
203
+
204
+ p.wait()
205
+ t1.join()
206
+ t2.join()
207
+ return p.returncode, stdout, stderr
208
+
209
+ def sign_file(self, filename, signer, sign_password, keystore=None):
210
+ """
211
+ Sign a file.
212
+
213
+ :param filename: The pathname to the file to be signed.
214
+ :param signer: The identifier of the signer of the file.
215
+ :param sign_password: The passphrase for the signer's
216
+ private key used for signing.
217
+ :param keystore: The path to a directory which contains the keys
218
+ used in signing. If not specified, the instance's
219
+ ``gpg_home`` attribute is used instead.
220
+ :return: The absolute pathname of the file where the signature is
221
+ stored.
222
+ """
223
+ cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
224
+ keystore)
225
+ rc, stdout, stderr = self.run_command(cmd,
226
+ sign_password.encode('utf-8'))
227
+ if rc != 0:
228
+ raise DistlibException('sign command failed with error '
229
+ 'code %s' % rc)
230
+ return sig_file
231
+
232
+ def upload_file(self, metadata, filename, signer=None, sign_password=None,
233
+ filetype='sdist', pyversion='source', keystore=None):
234
+ """
235
+ Upload a release file to the index.
236
+
237
+ :param metadata: A :class:`Metadata` instance defining at least a name
238
+ and version number for the file to be uploaded.
239
+ :param filename: The pathname of the file to be uploaded.
240
+ :param signer: The identifier of the signer of the file.
241
+ :param sign_password: The passphrase for the signer's
242
+ private key used for signing.
243
+ :param filetype: The type of the file being uploaded. This is the
244
+ distutils command which produced that file, e.g.
245
+ ``sdist`` or ``bdist_wheel``.
246
+ :param pyversion: The version of Python which the release relates
247
+ to. For code compatible with any Python, this would
248
+ be ``source``, otherwise it would be e.g. ``3.2``.
249
+ :param keystore: The path to a directory which contains the keys
250
+ used in signing. If not specified, the instance's
251
+ ``gpg_home`` attribute is used instead.
252
+ :return: The HTTP response received from PyPI upon submission of the
253
+ request.
254
+ """
255
+ self.check_credentials()
256
+ if not os.path.exists(filename):
257
+ raise DistlibException('not found: %s' % filename)
258
+ metadata.validate()
259
+ d = metadata.todict()
260
+ sig_file = None
261
+ if signer:
262
+ if not self.gpg:
263
+ logger.warning('no signing program available - not signed')
264
+ else:
265
+ sig_file = self.sign_file(filename, signer, sign_password,
266
+ keystore)
267
+ with open(filename, 'rb') as f:
268
+ file_data = f.read()
269
+ md5_digest = hashlib.md5(file_data).hexdigest()
270
+ sha256_digest = hashlib.sha256(file_data).hexdigest()
271
+ d.update({
272
+ ':action': 'file_upload',
273
+ 'protocol_version': '1',
274
+ 'filetype': filetype,
275
+ 'pyversion': pyversion,
276
+ 'md5_digest': md5_digest,
277
+ 'sha256_digest': sha256_digest,
278
+ })
279
+ files = [('content', os.path.basename(filename), file_data)]
280
+ if sig_file:
281
+ with open(sig_file, 'rb') as f:
282
+ sig_data = f.read()
283
+ files.append(('gpg_signature', os.path.basename(sig_file),
284
+ sig_data))
285
+ shutil.rmtree(os.path.dirname(sig_file))
286
+ request = self.encode_request(d.items(), files)
287
+ return self.send_request(request)
288
+
289
+ def upload_documentation(self, metadata, doc_dir):
290
+ """
291
+ Upload documentation to the index.
292
+
293
+ :param metadata: A :class:`Metadata` instance defining at least a name
294
+ and version number for the documentation to be
295
+ uploaded.
296
+ :param doc_dir: The pathname of the directory which contains the
297
+ documentation. This should be the directory that
298
+ contains the ``index.html`` for the documentation.
299
+ :return: The HTTP response received from PyPI upon submission of the
300
+ request.
301
+ """
302
+ self.check_credentials()
303
+ if not os.path.isdir(doc_dir):
304
+ raise DistlibException('not a directory: %r' % doc_dir)
305
+ fn = os.path.join(doc_dir, 'index.html')
306
+ if not os.path.exists(fn):
307
+ raise DistlibException('not found: %r' % fn)
308
+ metadata.validate()
309
+ name, version = metadata.name, metadata.version
310
+ zip_data = zip_dir(doc_dir).getvalue()
311
+ fields = [(':action', 'doc_upload'),
312
+ ('name', name), ('version', version)]
313
+ files = [('content', name, zip_data)]
314
+ request = self.encode_request(fields, files)
315
+ return self.send_request(request)
316
+
317
+ def get_verify_command(self, signature_filename, data_filename,
318
+ keystore=None):
319
+ """
320
+ Return a suitable command for verifying a file.
321
+
322
+ :param signature_filename: The pathname to the file containing the
323
+ signature.
324
+ :param data_filename: The pathname to the file containing the
325
+ signed data.
326
+ :param keystore: The path to a directory which contains the keys
327
+ used in verification. If not specified, the
328
+ instance's ``gpg_home`` attribute is used instead.
329
+ :return: The verifying command as a list suitable to be
330
+ passed to :class:`subprocess.Popen`.
331
+ """
332
+ cmd = [self.gpg, '--status-fd', '2', '--no-tty']
333
+ if keystore is None:
334
+ keystore = self.gpg_home
335
+ if keystore:
336
+ cmd.extend(['--homedir', keystore])
337
+ cmd.extend(['--verify', signature_filename, data_filename])
338
+ logger.debug('invoking: %s', ' '.join(cmd))
339
+ return cmd
340
+
341
+ def verify_signature(self, signature_filename, data_filename,
342
+ keystore=None):
343
+ """
344
+ Verify a signature for a file.
345
+
346
+ :param signature_filename: The pathname to the file containing the
347
+ signature.
348
+ :param data_filename: The pathname to the file containing the
349
+ signed data.
350
+ :param keystore: The path to a directory which contains the keys
351
+ used in verification. If not specified, the
352
+ instance's ``gpg_home`` attribute is used instead.
353
+ :return: True if the signature was verified, else False.
354
+ """
355
+ if not self.gpg:
356
+ raise DistlibException('verification unavailable because gpg '
357
+ 'unavailable')
358
+ cmd = self.get_verify_command(signature_filename, data_filename,
359
+ keystore)
360
+ rc, stdout, stderr = self.run_command(cmd)
361
+ if rc not in (0, 1):
362
+ raise DistlibException('verify command failed with error '
363
+ 'code %s' % rc)
364
+ return rc == 0
365
+
366
+ def download_file(self, url, destfile, digest=None, reporthook=None):
367
+ """
368
+ This is a convenience method for downloading a file from an URL.
369
+ Normally, this will be a file from the index, though currently
370
+ no check is made for this (i.e. a file can be downloaded from
371
+ anywhere).
372
+
373
+ The method is just like the :func:`urlretrieve` function in the
374
+ standard library, except that it allows digest computation to be
375
+ done during download and checking that the downloaded data
376
+ matched any expected value.
377
+
378
+ :param url: The URL of the file to be downloaded (assumed to be
379
+ available via an HTTP GET request).
380
+ :param destfile: The pathname where the downloaded file is to be
381
+ saved.
382
+ :param digest: If specified, this must be a (hasher, value)
383
+ tuple, where hasher is the algorithm used (e.g.
384
+ ``'md5'``) and ``value`` is the expected value.
385
+ :param reporthook: The same as for :func:`urlretrieve` in the
386
+ standard library.
387
+ """
388
+ if digest is None:
389
+ digester = None
390
+ logger.debug('No digest specified')
391
+ else:
392
+ if isinstance(digest, (list, tuple)):
393
+ hasher, digest = digest
394
+ else:
395
+ hasher = 'md5'
396
+ digester = getattr(hashlib, hasher)()
397
+ logger.debug('Digest specified: %s' % digest)
398
+ # The following code is equivalent to urlretrieve.
399
+ # We need to do it this way so that we can compute the
400
+ # digest of the file as we go.
401
+ with open(destfile, 'wb') as dfp:
402
+ # addinfourl is not a context manager on 2.x
403
+ # so we have to use try/finally
404
+ sfp = self.send_request(Request(url))
405
+ try:
406
+ headers = sfp.info()
407
+ blocksize = 8192
408
+ size = -1
409
+ read = 0
410
+ blocknum = 0
411
+ if "content-length" in headers:
412
+ size = int(headers["Content-Length"])
413
+ if reporthook:
414
+ reporthook(blocknum, blocksize, size)
415
+ while True:
416
+ block = sfp.read(blocksize)
417
+ if not block:
418
+ break
419
+ read += len(block)
420
+ dfp.write(block)
421
+ if digester:
422
+ digester.update(block)
423
+ blocknum += 1
424
+ if reporthook:
425
+ reporthook(blocknum, blocksize, size)
426
+ finally:
427
+ sfp.close()
428
+
429
+ # check that we got the whole file, if we can
430
+ if size >= 0 and read < size:
431
+ raise DistlibException(
432
+ 'retrieval incomplete: got only %d out of %d bytes'
433
+ % (read, size))
434
+ # if we have a digest, it must match.
435
+ if digester:
436
+ actual = digester.hexdigest()
437
+ if digest != actual:
438
+ raise DistlibException('%s digest mismatch for %s: expected '
439
+ '%s, got %s' % (hasher, destfile,
440
+ digest, actual))
441
+ logger.debug('Digest verified: %s', digest)
442
+
443
+ def send_request(self, req):
444
+ """
445
+ Send a standard library :class:`Request` to PyPI and return its
446
+ response.
447
+
448
+ :param req: The request to send.
449
+ :return: The HTTP response from PyPI (a standard library HTTPResponse).
450
+ """
451
+ handlers = []
452
+ if self.password_handler:
453
+ handlers.append(self.password_handler)
454
+ if self.ssl_verifier:
455
+ handlers.append(self.ssl_verifier)
456
+ opener = build_opener(*handlers)
457
+ return opener.open(req)
458
+
459
+ def encode_request(self, fields, files):
460
+ """
461
+ Encode fields and files for posting to an HTTP server.
462
+
463
+ :param fields: The fields to send as a list of (fieldname, value)
464
+ tuples.
465
+ :param files: The files to send as a list of (fieldname, filename,
466
+ file_bytes) tuple.
467
+ """
468
+ # Adapted from packaging, which in turn was adapted from
469
+ # http://code.activestate.com/recipes/146306
470
+
471
+ parts = []
472
+ boundary = self.boundary
473
+ for k, values in fields:
474
+ if not isinstance(values, (list, tuple)):
475
+ values = [values]
476
+
477
+ for v in values:
478
+ parts.extend((
479
+ b'--' + boundary,
480
+ ('Content-Disposition: form-data; name="%s"' %
481
+ k).encode('utf-8'),
482
+ b'',
483
+ v.encode('utf-8')))
484
+ for key, filename, value in files:
485
+ parts.extend((
486
+ b'--' + boundary,
487
+ ('Content-Disposition: form-data; name="%s"; filename="%s"' %
488
+ (key, filename)).encode('utf-8'),
489
+ b'',
490
+ value))
491
+
492
+ parts.extend((b'--' + boundary + b'--', b''))
493
+
494
+ body = b'\r\n'.join(parts)
495
+ ct = b'multipart/form-data; boundary=' + boundary
496
+ headers = {
497
+ 'Content-type': ct,
498
+ 'Content-length': str(len(body))
499
+ }
500
+ return Request(self.url, body, headers)
501
+
502
+ def search(self, terms, operator=None):
503
+ if isinstance(terms, string_types):
504
+ terms = {'name': terms}
505
+ rpc_proxy = ServerProxy(self.url, timeout=3.0)
506
+ try:
507
+ return rpc_proxy.search(terms, operator or 'and')
508
+ finally:
509
+ rpc_proxy('close')()
venv/lib/python3.10/site-packages/pip/_vendor/distlib/locators.py ADDED
@@ -0,0 +1,1300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2012-2015 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+
8
+ import gzip
9
+ from io import BytesIO
10
+ import json
11
+ import logging
12
+ import os
13
+ import posixpath
14
+ import re
15
+ try:
16
+ import threading
17
+ except ImportError: # pragma: no cover
18
+ import dummy_threading as threading
19
+ import zlib
20
+
21
+ from . import DistlibException
22
+ from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
23
+ queue, quote, unescape, build_opener,
24
+ HTTPRedirectHandler as BaseRedirectHandler, text_type,
25
+ Request, HTTPError, URLError)
26
+ from .database import Distribution, DistributionPath, make_dist
27
+ from .metadata import Metadata, MetadataInvalidError
28
+ from .util import (cached_property, ensure_slash, split_filename, get_project_data,
29
+ parse_requirement, parse_name_and_version, ServerProxy,
30
+ normalize_name)
31
+ from .version import get_scheme, UnsupportedVersionError
32
+ from .wheel import Wheel, is_compatible
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
37
+ CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
38
+ HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
39
+ DEFAULT_INDEX = 'https://pypi.org/pypi'
40
+
41
+ def get_all_distribution_names(url=None):
42
+ """
43
+ Return all distribution names known by an index.
44
+ :param url: The URL of the index.
45
+ :return: A list of all known distribution names.
46
+ """
47
+ if url is None:
48
+ url = DEFAULT_INDEX
49
+ client = ServerProxy(url, timeout=3.0)
50
+ try:
51
+ return client.list_packages()
52
+ finally:
53
+ client('close')()
54
+
55
+ class RedirectHandler(BaseRedirectHandler):
56
+ """
57
+ A class to work around a bug in some Python 3.2.x releases.
58
+ """
59
+ # There's a bug in the base version for some 3.2.x
60
+ # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
61
+ # returns e.g. /abc, it bails because it says the scheme ''
62
+ # is bogus, when actually it should use the request's
63
+ # URL for the scheme. See Python issue #13696.
64
+ def http_error_302(self, req, fp, code, msg, headers):
65
+ # Some servers (incorrectly) return multiple Location headers
66
+ # (so probably same goes for URI). Use first header.
67
+ newurl = None
68
+ for key in ('location', 'uri'):
69
+ if key in headers:
70
+ newurl = headers[key]
71
+ break
72
+ if newurl is None: # pragma: no cover
73
+ return
74
+ urlparts = urlparse(newurl)
75
+ if urlparts.scheme == '':
76
+ newurl = urljoin(req.get_full_url(), newurl)
77
+ if hasattr(headers, 'replace_header'):
78
+ headers.replace_header(key, newurl)
79
+ else:
80
+ headers[key] = newurl
81
+ return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
82
+ headers)
83
+
84
+ http_error_301 = http_error_303 = http_error_307 = http_error_302
85
+
86
+ class Locator(object):
87
+ """
88
+ A base class for locators - things that locate distributions.
89
+ """
90
+ source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
91
+ binary_extensions = ('.egg', '.exe', '.whl')
92
+ excluded_extensions = ('.pdf',)
93
+
94
+ # A list of tags indicating which wheels you want to match. The default
95
+ # value of None matches against the tags compatible with the running
96
+ # Python. If you want to match other values, set wheel_tags on a locator
97
+ # instance to a list of tuples (pyver, abi, arch) which you want to match.
98
+ wheel_tags = None
99
+
100
+ downloadable_extensions = source_extensions + ('.whl',)
101
+
102
+ def __init__(self, scheme='default'):
103
+ """
104
+ Initialise an instance.
105
+ :param scheme: Because locators look for most recent versions, they
106
+ need to know the version scheme to use. This specifies
107
+ the current PEP-recommended scheme - use ``'legacy'``
108
+ if you need to support existing distributions on PyPI.
109
+ """
110
+ self._cache = {}
111
+ self.scheme = scheme
112
+ # Because of bugs in some of the handlers on some of the platforms,
113
+ # we use our own opener rather than just using urlopen.
114
+ self.opener = build_opener(RedirectHandler())
115
+ # If get_project() is called from locate(), the matcher instance
116
+ # is set from the requirement passed to locate(). See issue #18 for
117
+ # why this can be useful to know.
118
+ self.matcher = None
119
+ self.errors = queue.Queue()
120
+
121
+ def get_errors(self):
122
+ """
123
+ Return any errors which have occurred.
124
+ """
125
+ result = []
126
+ while not self.errors.empty(): # pragma: no cover
127
+ try:
128
+ e = self.errors.get(False)
129
+ result.append(e)
130
+ except self.errors.Empty:
131
+ continue
132
+ self.errors.task_done()
133
+ return result
134
+
135
+ def clear_errors(self):
136
+ """
137
+ Clear any errors which may have been logged.
138
+ """
139
+ # Just get the errors and throw them away
140
+ self.get_errors()
141
+
142
+ def clear_cache(self):
143
+ self._cache.clear()
144
+
145
+ def _get_scheme(self):
146
+ return self._scheme
147
+
148
+ def _set_scheme(self, value):
149
+ self._scheme = value
150
+
151
+ scheme = property(_get_scheme, _set_scheme)
152
+
153
+ def _get_project(self, name):
154
+ """
155
+ For a given project, get a dictionary mapping available versions to Distribution
156
+ instances.
157
+
158
+ This should be implemented in subclasses.
159
+
160
+ If called from a locate() request, self.matcher will be set to a
161
+ matcher for the requirement to satisfy, otherwise it will be None.
162
+ """
163
+ raise NotImplementedError('Please implement in the subclass')
164
+
165
+ def get_distribution_names(self):
166
+ """
167
+ Return all the distribution names known to this locator.
168
+ """
169
+ raise NotImplementedError('Please implement in the subclass')
170
+
171
+ def get_project(self, name):
172
+ """
173
+ For a given project, get a dictionary mapping available versions to Distribution
174
+ instances.
175
+
176
+ This calls _get_project to do all the work, and just implements a caching layer on top.
177
+ """
178
+ if self._cache is None: # pragma: no cover
179
+ result = self._get_project(name)
180
+ elif name in self._cache:
181
+ result = self._cache[name]
182
+ else:
183
+ self.clear_errors()
184
+ result = self._get_project(name)
185
+ self._cache[name] = result
186
+ return result
187
+
188
+ def score_url(self, url):
189
+ """
190
+ Give an url a score which can be used to choose preferred URLs
191
+ for a given project release.
192
+ """
193
+ t = urlparse(url)
194
+ basename = posixpath.basename(t.path)
195
+ compatible = True
196
+ is_wheel = basename.endswith('.whl')
197
+ is_downloadable = basename.endswith(self.downloadable_extensions)
198
+ if is_wheel:
199
+ compatible = is_compatible(Wheel(basename), self.wheel_tags)
200
+ return (t.scheme == 'https', 'pypi.org' in t.netloc,
201
+ is_downloadable, is_wheel, compatible, basename)
202
+
203
+ def prefer_url(self, url1, url2):
204
+ """
205
+ Choose one of two URLs where both are candidates for distribution
206
+ archives for the same version of a distribution (for example,
207
+ .tar.gz vs. zip).
208
+
209
+ The current implementation favours https:// URLs over http://, archives
210
+ from PyPI over those from other locations, wheel compatibility (if a
211
+ wheel) and then the archive name.
212
+ """
213
+ result = url2
214
+ if url1:
215
+ s1 = self.score_url(url1)
216
+ s2 = self.score_url(url2)
217
+ if s1 > s2:
218
+ result = url1
219
+ if result != url2:
220
+ logger.debug('Not replacing %r with %r', url1, url2)
221
+ else:
222
+ logger.debug('Replacing %r with %r', url1, url2)
223
+ return result
224
+
225
+ def split_filename(self, filename, project_name):
226
+ """
227
+ Attempt to split a filename in project name, version and Python version.
228
+ """
229
+ return split_filename(filename, project_name)
230
+
231
+ def convert_url_to_download_info(self, url, project_name):
232
+ """
233
+ See if a URL is a candidate for a download URL for a project (the URL
234
+ has typically been scraped from an HTML page).
235
+
236
+ If it is, a dictionary is returned with keys "name", "version",
237
+ "filename" and "url"; otherwise, None is returned.
238
+ """
239
+ def same_project(name1, name2):
240
+ return normalize_name(name1) == normalize_name(name2)
241
+
242
+ result = None
243
+ scheme, netloc, path, params, query, frag = urlparse(url)
244
+ if frag.lower().startswith('egg='): # pragma: no cover
245
+ logger.debug('%s: version hint in fragment: %r',
246
+ project_name, frag)
247
+ m = HASHER_HASH.match(frag)
248
+ if m:
249
+ algo, digest = m.groups()
250
+ else:
251
+ algo, digest = None, None
252
+ origpath = path
253
+ if path and path[-1] == '/': # pragma: no cover
254
+ path = path[:-1]
255
+ if path.endswith('.whl'):
256
+ try:
257
+ wheel = Wheel(path)
258
+ if not is_compatible(wheel, self.wheel_tags):
259
+ logger.debug('Wheel not compatible: %s', path)
260
+ else:
261
+ if project_name is None:
262
+ include = True
263
+ else:
264
+ include = same_project(wheel.name, project_name)
265
+ if include:
266
+ result = {
267
+ 'name': wheel.name,
268
+ 'version': wheel.version,
269
+ 'filename': wheel.filename,
270
+ 'url': urlunparse((scheme, netloc, origpath,
271
+ params, query, '')),
272
+ 'python-version': ', '.join(
273
+ ['.'.join(list(v[2:])) for v in wheel.pyver]),
274
+ }
275
+ except Exception as e: # pragma: no cover
276
+ logger.warning('invalid path for wheel: %s', path)
277
+ elif not path.endswith(self.downloadable_extensions): # pragma: no cover
278
+ logger.debug('Not downloadable: %s', path)
279
+ else: # downloadable extension
280
+ path = filename = posixpath.basename(path)
281
+ for ext in self.downloadable_extensions:
282
+ if path.endswith(ext):
283
+ path = path[:-len(ext)]
284
+ t = self.split_filename(path, project_name)
285
+ if not t: # pragma: no cover
286
+ logger.debug('No match for project/version: %s', path)
287
+ else:
288
+ name, version, pyver = t
289
+ if not project_name or same_project(project_name, name):
290
+ result = {
291
+ 'name': name,
292
+ 'version': version,
293
+ 'filename': filename,
294
+ 'url': urlunparse((scheme, netloc, origpath,
295
+ params, query, '')),
296
+ #'packagetype': 'sdist',
297
+ }
298
+ if pyver: # pragma: no cover
299
+ result['python-version'] = pyver
300
+ break
301
+ if result and algo:
302
+ result['%s_digest' % algo] = digest
303
+ return result
304
+
305
+ def _get_digest(self, info):
306
+ """
307
+ Get a digest from a dictionary by looking at a "digests" dictionary
308
+ or keys of the form 'algo_digest'.
309
+
310
+ Returns a 2-tuple (algo, digest) if found, else None. Currently
311
+ looks only for SHA256, then MD5.
312
+ """
313
+ result = None
314
+ if 'digests' in info:
315
+ digests = info['digests']
316
+ for algo in ('sha256', 'md5'):
317
+ if algo in digests:
318
+ result = (algo, digests[algo])
319
+ break
320
+ if not result:
321
+ for algo in ('sha256', 'md5'):
322
+ key = '%s_digest' % algo
323
+ if key in info:
324
+ result = (algo, info[key])
325
+ break
326
+ return result
327
+
328
+ def _update_version_data(self, result, info):
329
+ """
330
+ Update a result dictionary (the final result from _get_project) with a
331
+ dictionary for a specific version, which typically holds information
332
+ gleaned from a filename or URL for an archive for the distribution.
333
+ """
334
+ name = info.pop('name')
335
+ version = info.pop('version')
336
+ if version in result:
337
+ dist = result[version]
338
+ md = dist.metadata
339
+ else:
340
+ dist = make_dist(name, version, scheme=self.scheme)
341
+ md = dist.metadata
342
+ dist.digest = digest = self._get_digest(info)
343
+ url = info['url']
344
+ result['digests'][url] = digest
345
+ if md.source_url != info['url']:
346
+ md.source_url = self.prefer_url(md.source_url, url)
347
+ result['urls'].setdefault(version, set()).add(url)
348
+ dist.locator = self
349
+ result[version] = dist
350
+
351
+ def locate(self, requirement, prereleases=False):
352
+ """
353
+ Find the most recent distribution which matches the given
354
+ requirement.
355
+
356
+ :param requirement: A requirement of the form 'foo (1.0)' or perhaps
357
+ 'foo (>= 1.0, < 2.0, != 1.3)'
358
+ :param prereleases: If ``True``, allow pre-release versions
359
+ to be located. Otherwise, pre-release versions
360
+ are not returned.
361
+ :return: A :class:`Distribution` instance, or ``None`` if no such
362
+ distribution could be located.
363
+ """
364
+ result = None
365
+ r = parse_requirement(requirement)
366
+ if r is None: # pragma: no cover
367
+ raise DistlibException('Not a valid requirement: %r' % requirement)
368
+ scheme = get_scheme(self.scheme)
369
+ self.matcher = matcher = scheme.matcher(r.requirement)
370
+ logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
371
+ versions = self.get_project(r.name)
372
+ if len(versions) > 2: # urls and digests keys are present
373
+ # sometimes, versions are invalid
374
+ slist = []
375
+ vcls = matcher.version_class
376
+ for k in versions:
377
+ if k in ('urls', 'digests'):
378
+ continue
379
+ try:
380
+ if not matcher.match(k):
381
+ pass # logger.debug('%s did not match %r', matcher, k)
382
+ else:
383
+ if prereleases or not vcls(k).is_prerelease:
384
+ slist.append(k)
385
+ # else:
386
+ # logger.debug('skipping pre-release '
387
+ # 'version %s of %s', k, matcher.name)
388
+ except Exception: # pragma: no cover
389
+ logger.warning('error matching %s with %r', matcher, k)
390
+ pass # slist.append(k)
391
+ if len(slist) > 1:
392
+ slist = sorted(slist, key=scheme.key)
393
+ if slist:
394
+ logger.debug('sorted list: %s', slist)
395
+ version = slist[-1]
396
+ result = versions[version]
397
+ if result:
398
+ if r.extras:
399
+ result.extras = r.extras
400
+ result.download_urls = versions.get('urls', {}).get(version, set())
401
+ d = {}
402
+ sd = versions.get('digests', {})
403
+ for url in result.download_urls:
404
+ if url in sd: # pragma: no cover
405
+ d[url] = sd[url]
406
+ result.digests = d
407
+ self.matcher = None
408
+ return result
409
+
410
+
411
+ class PyPIRPCLocator(Locator):
412
+ """
413
+ This locator uses XML-RPC to locate distributions. It therefore
414
+ cannot be used with simple mirrors (that only mirror file content).
415
+ """
416
+ def __init__(self, url, **kwargs):
417
+ """
418
+ Initialise an instance.
419
+
420
+ :param url: The URL to use for XML-RPC.
421
+ :param kwargs: Passed to the superclass constructor.
422
+ """
423
+ super(PyPIRPCLocator, self).__init__(**kwargs)
424
+ self.base_url = url
425
+ self.client = ServerProxy(url, timeout=3.0)
426
+
427
+ def get_distribution_names(self):
428
+ """
429
+ Return all the distribution names known to this locator.
430
+ """
431
+ return set(self.client.list_packages())
432
+
433
+ def _get_project(self, name):
434
+ result = {'urls': {}, 'digests': {}}
435
+ versions = self.client.package_releases(name, True)
436
+ for v in versions:
437
+ urls = self.client.release_urls(name, v)
438
+ data = self.client.release_data(name, v)
439
+ metadata = Metadata(scheme=self.scheme)
440
+ metadata.name = data['name']
441
+ metadata.version = data['version']
442
+ metadata.license = data.get('license')
443
+ metadata.keywords = data.get('keywords', [])
444
+ metadata.summary = data.get('summary')
445
+ dist = Distribution(metadata)
446
+ if urls:
447
+ info = urls[0]
448
+ metadata.source_url = info['url']
449
+ dist.digest = self._get_digest(info)
450
+ dist.locator = self
451
+ result[v] = dist
452
+ for info in urls:
453
+ url = info['url']
454
+ digest = self._get_digest(info)
455
+ result['urls'].setdefault(v, set()).add(url)
456
+ result['digests'][url] = digest
457
+ return result
458
+
459
+ class PyPIJSONLocator(Locator):
460
+ """
461
+ This locator uses PyPI's JSON interface. It's very limited in functionality
462
+ and probably not worth using.
463
+ """
464
+ def __init__(self, url, **kwargs):
465
+ super(PyPIJSONLocator, self).__init__(**kwargs)
466
+ self.base_url = ensure_slash(url)
467
+
468
+ def get_distribution_names(self):
469
+ """
470
+ Return all the distribution names known to this locator.
471
+ """
472
+ raise NotImplementedError('Not available from this locator')
473
+
474
+ def _get_project(self, name):
475
+ result = {'urls': {}, 'digests': {}}
476
+ url = urljoin(self.base_url, '%s/json' % quote(name))
477
+ try:
478
+ resp = self.opener.open(url)
479
+ data = resp.read().decode() # for now
480
+ d = json.loads(data)
481
+ md = Metadata(scheme=self.scheme)
482
+ data = d['info']
483
+ md.name = data['name']
484
+ md.version = data['version']
485
+ md.license = data.get('license')
486
+ md.keywords = data.get('keywords', [])
487
+ md.summary = data.get('summary')
488
+ dist = Distribution(md)
489
+ dist.locator = self
490
+ urls = d['urls']
491
+ result[md.version] = dist
492
+ for info in d['urls']:
493
+ url = info['url']
494
+ dist.download_urls.add(url)
495
+ dist.digests[url] = self._get_digest(info)
496
+ result['urls'].setdefault(md.version, set()).add(url)
497
+ result['digests'][url] = self._get_digest(info)
498
+ # Now get other releases
499
+ for version, infos in d['releases'].items():
500
+ if version == md.version:
501
+ continue # already done
502
+ omd = Metadata(scheme=self.scheme)
503
+ omd.name = md.name
504
+ omd.version = version
505
+ odist = Distribution(omd)
506
+ odist.locator = self
507
+ result[version] = odist
508
+ for info in infos:
509
+ url = info['url']
510
+ odist.download_urls.add(url)
511
+ odist.digests[url] = self._get_digest(info)
512
+ result['urls'].setdefault(version, set()).add(url)
513
+ result['digests'][url] = self._get_digest(info)
514
+ # for info in urls:
515
+ # md.source_url = info['url']
516
+ # dist.digest = self._get_digest(info)
517
+ # dist.locator = self
518
+ # for info in urls:
519
+ # url = info['url']
520
+ # result['urls'].setdefault(md.version, set()).add(url)
521
+ # result['digests'][url] = self._get_digest(info)
522
+ except Exception as e:
523
+ self.errors.put(text_type(e))
524
+ logger.exception('JSON fetch failed: %s', e)
525
+ return result
526
+
527
+
528
+ class Page(object):
529
+ """
530
+ This class represents a scraped HTML page.
531
+ """
532
+ # The following slightly hairy-looking regex just looks for the contents of
533
+ # an anchor link, which has an attribute "href" either immediately preceded
534
+ # or immediately followed by a "rel" attribute. The attribute values can be
535
+ # declared with double quotes, single quotes or no quotes - which leads to
536
+ # the length of the expression.
537
+ _href = re.compile("""
538
+ (rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
539
+ href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
540
+ (\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
541
+ """, re.I | re.S | re.X)
542
+ _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
543
+
544
+ def __init__(self, data, url):
545
+ """
546
+ Initialise an instance with the Unicode page contents and the URL they
547
+ came from.
548
+ """
549
+ self.data = data
550
+ self.base_url = self.url = url
551
+ m = self._base.search(self.data)
552
+ if m:
553
+ self.base_url = m.group(1)
554
+
555
+ _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
556
+
557
+ @cached_property
558
+ def links(self):
559
+ """
560
+ Return the URLs of all the links on a page together with information
561
+ about their "rel" attribute, for determining which ones to treat as
562
+ downloads and which ones to queue for further scraping.
563
+ """
564
+ def clean(url):
565
+ "Tidy up an URL."
566
+ scheme, netloc, path, params, query, frag = urlparse(url)
567
+ return urlunparse((scheme, netloc, quote(path),
568
+ params, query, frag))
569
+
570
+ result = set()
571
+ for match in self._href.finditer(self.data):
572
+ d = match.groupdict('')
573
+ rel = (d['rel1'] or d['rel2'] or d['rel3'] or
574
+ d['rel4'] or d['rel5'] or d['rel6'])
575
+ url = d['url1'] or d['url2'] or d['url3']
576
+ url = urljoin(self.base_url, url)
577
+ url = unescape(url)
578
+ url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
579
+ result.add((url, rel))
580
+ # We sort the result, hoping to bring the most recent versions
581
+ # to the front
582
+ result = sorted(result, key=lambda t: t[0], reverse=True)
583
+ return result
584
+
585
+
586
+ class SimpleScrapingLocator(Locator):
587
+ """
588
+ A locator which scrapes HTML pages to locate downloads for a distribution.
589
+ This runs multiple threads to do the I/O; performance is at least as good
590
+ as pip's PackageFinder, which works in an analogous fashion.
591
+ """
592
+
593
+ # These are used to deal with various Content-Encoding schemes.
594
+ decoders = {
595
+ 'deflate': zlib.decompress,
596
+ 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),
597
+ 'none': lambda b: b,
598
+ }
599
+
600
+ def __init__(self, url, timeout=None, num_workers=10, **kwargs):
601
+ """
602
+ Initialise an instance.
603
+ :param url: The root URL to use for scraping.
604
+ :param timeout: The timeout, in seconds, to be applied to requests.
605
+ This defaults to ``None`` (no timeout specified).
606
+ :param num_workers: The number of worker threads you want to do I/O,
607
+ This defaults to 10.
608
+ :param kwargs: Passed to the superclass.
609
+ """
610
+ super(SimpleScrapingLocator, self).__init__(**kwargs)
611
+ self.base_url = ensure_slash(url)
612
+ self.timeout = timeout
613
+ self._page_cache = {}
614
+ self._seen = set()
615
+ self._to_fetch = queue.Queue()
616
+ self._bad_hosts = set()
617
+ self.skip_externals = False
618
+ self.num_workers = num_workers
619
+ self._lock = threading.RLock()
620
+ # See issue #45: we need to be resilient when the locator is used
621
+ # in a thread, e.g. with concurrent.futures. We can't use self._lock
622
+ # as it is for coordinating our internal threads - the ones created
623
+ # in _prepare_threads.
624
+ self._gplock = threading.RLock()
625
+ self.platform_check = False # See issue #112
626
+
627
+ def _prepare_threads(self):
628
+ """
629
+ Threads are created only when get_project is called, and terminate
630
+ before it returns. They are there primarily to parallelise I/O (i.e.
631
+ fetching web pages).
632
+ """
633
+ self._threads = []
634
+ for i in range(self.num_workers):
635
+ t = threading.Thread(target=self._fetch)
636
+ t.daemon = True
637
+ t.start()
638
+ self._threads.append(t)
639
+
640
+ def _wait_threads(self):
641
+ """
642
+ Tell all the threads to terminate (by sending a sentinel value) and
643
+ wait for them to do so.
644
+ """
645
+ # Note that you need two loops, since you can't say which
646
+ # thread will get each sentinel
647
+ for t in self._threads:
648
+ self._to_fetch.put(None) # sentinel
649
+ for t in self._threads:
650
+ t.join()
651
+ self._threads = []
652
+
653
+ def _get_project(self, name):
654
+ result = {'urls': {}, 'digests': {}}
655
+ with self._gplock:
656
+ self.result = result
657
+ self.project_name = name
658
+ url = urljoin(self.base_url, '%s/' % quote(name))
659
+ self._seen.clear()
660
+ self._page_cache.clear()
661
+ self._prepare_threads()
662
+ try:
663
+ logger.debug('Queueing %s', url)
664
+ self._to_fetch.put(url)
665
+ self._to_fetch.join()
666
+ finally:
667
+ self._wait_threads()
668
+ del self.result
669
+ return result
670
+
671
+ platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
672
+ r'win(32|_amd64)|macosx_?\d+)\b', re.I)
673
+
674
+ def _is_platform_dependent(self, url):
675
+ """
676
+ Does an URL refer to a platform-specific download?
677
+ """
678
+ return self.platform_dependent.search(url)
679
+
680
+ def _process_download(self, url):
681
+ """
682
+ See if an URL is a suitable download for a project.
683
+
684
+ If it is, register information in the result dictionary (for
685
+ _get_project) about the specific version it's for.
686
+
687
+ Note that the return value isn't actually used other than as a boolean
688
+ value.
689
+ """
690
+ if self.platform_check and self._is_platform_dependent(url):
691
+ info = None
692
+ else:
693
+ info = self.convert_url_to_download_info(url, self.project_name)
694
+ logger.debug('process_download: %s -> %s', url, info)
695
+ if info:
696
+ with self._lock: # needed because self.result is shared
697
+ self._update_version_data(self.result, info)
698
+ return info
699
+
700
+ def _should_queue(self, link, referrer, rel):
701
+ """
702
+ Determine whether a link URL from a referring page and with a
703
+ particular "rel" attribute should be queued for scraping.
704
+ """
705
+ scheme, netloc, path, _, _, _ = urlparse(link)
706
+ if path.endswith(self.source_extensions + self.binary_extensions +
707
+ self.excluded_extensions):
708
+ result = False
709
+ elif self.skip_externals and not link.startswith(self.base_url):
710
+ result = False
711
+ elif not referrer.startswith(self.base_url):
712
+ result = False
713
+ elif rel not in ('homepage', 'download'):
714
+ result = False
715
+ elif scheme not in ('http', 'https', 'ftp'):
716
+ result = False
717
+ elif self._is_platform_dependent(link):
718
+ result = False
719
+ else:
720
+ host = netloc.split(':', 1)[0]
721
+ if host.lower() == 'localhost':
722
+ result = False
723
+ else:
724
+ result = True
725
+ logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
726
+ referrer, result)
727
+ return result
728
+
729
+ def _fetch(self):
730
+ """
731
+ Get a URL to fetch from the work queue, get the HTML page, examine its
732
+ links for download candidates and candidates for further scraping.
733
+
734
+ This is a handy method to run in a thread.
735
+ """
736
+ while True:
737
+ url = self._to_fetch.get()
738
+ try:
739
+ if url:
740
+ page = self.get_page(url)
741
+ if page is None: # e.g. after an error
742
+ continue
743
+ for link, rel in page.links:
744
+ if link not in self._seen:
745
+ try:
746
+ self._seen.add(link)
747
+ if (not self._process_download(link) and
748
+ self._should_queue(link, url, rel)):
749
+ logger.debug('Queueing %s from %s', link, url)
750
+ self._to_fetch.put(link)
751
+ except MetadataInvalidError: # e.g. invalid versions
752
+ pass
753
+ except Exception as e: # pragma: no cover
754
+ self.errors.put(text_type(e))
755
+ finally:
756
+ # always do this, to avoid hangs :-)
757
+ self._to_fetch.task_done()
758
+ if not url:
759
+ #logger.debug('Sentinel seen, quitting.')
760
+ break
761
+
762
+ def get_page(self, url):
763
+ """
764
+ Get the HTML for an URL, possibly from an in-memory cache.
765
+
766
+ XXX TODO Note: this cache is never actually cleared. It's assumed that
767
+ the data won't get stale over the lifetime of a locator instance (not
768
+ necessarily true for the default_locator).
769
+ """
770
+ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
771
+ scheme, netloc, path, _, _, _ = urlparse(url)
772
+ if scheme == 'file' and os.path.isdir(url2pathname(path)):
773
+ url = urljoin(ensure_slash(url), 'index.html')
774
+
775
+ if url in self._page_cache:
776
+ result = self._page_cache[url]
777
+ logger.debug('Returning %s from cache: %s', url, result)
778
+ else:
779
+ host = netloc.split(':', 1)[0]
780
+ result = None
781
+ if host in self._bad_hosts:
782
+ logger.debug('Skipping %s due to bad host %s', url, host)
783
+ else:
784
+ req = Request(url, headers={'Accept-encoding': 'identity'})
785
+ try:
786
+ logger.debug('Fetching %s', url)
787
+ resp = self.opener.open(req, timeout=self.timeout)
788
+ logger.debug('Fetched %s', url)
789
+ headers = resp.info()
790
+ content_type = headers.get('Content-Type', '')
791
+ if HTML_CONTENT_TYPE.match(content_type):
792
+ final_url = resp.geturl()
793
+ data = resp.read()
794
+ encoding = headers.get('Content-Encoding')
795
+ if encoding:
796
+ decoder = self.decoders[encoding] # fail if not found
797
+ data = decoder(data)
798
+ encoding = 'utf-8'
799
+ m = CHARSET.search(content_type)
800
+ if m:
801
+ encoding = m.group(1)
802
+ try:
803
+ data = data.decode(encoding)
804
+ except UnicodeError: # pragma: no cover
805
+ data = data.decode('latin-1') # fallback
806
+ result = Page(data, final_url)
807
+ self._page_cache[final_url] = result
808
+ except HTTPError as e:
809
+ if e.code != 404:
810
+ logger.exception('Fetch failed: %s: %s', url, e)
811
+ except URLError as e: # pragma: no cover
812
+ logger.exception('Fetch failed: %s: %s', url, e)
813
+ with self._lock:
814
+ self._bad_hosts.add(host)
815
+ except Exception as e: # pragma: no cover
816
+ logger.exception('Fetch failed: %s: %s', url, e)
817
+ finally:
818
+ self._page_cache[url] = result # even if None (failure)
819
+ return result
820
+
821
+ _distname_re = re.compile('<a href=[^>]*>([^<]+)<')
822
+
823
+ def get_distribution_names(self):
824
+ """
825
+ Return all the distribution names known to this locator.
826
+ """
827
+ result = set()
828
+ page = self.get_page(self.base_url)
829
+ if not page:
830
+ raise DistlibException('Unable to get %s' % self.base_url)
831
+ for match in self._distname_re.finditer(page.data):
832
+ result.add(match.group(1))
833
+ return result
834
+
835
+ class DirectoryLocator(Locator):
836
+ """
837
+ This class locates distributions in a directory tree.
838
+ """
839
+
840
+ def __init__(self, path, **kwargs):
841
+ """
842
+ Initialise an instance.
843
+ :param path: The root of the directory tree to search.
844
+ :param kwargs: Passed to the superclass constructor,
845
+ except for:
846
+ * recursive - if True (the default), subdirectories are
847
+ recursed into. If False, only the top-level directory
848
+ is searched,
849
+ """
850
+ self.recursive = kwargs.pop('recursive', True)
851
+ super(DirectoryLocator, self).__init__(**kwargs)
852
+ path = os.path.abspath(path)
853
+ if not os.path.isdir(path): # pragma: no cover
854
+ raise DistlibException('Not a directory: %r' % path)
855
+ self.base_dir = path
856
+
857
+ def should_include(self, filename, parent):
858
+ """
859
+ Should a filename be considered as a candidate for a distribution
860
+ archive? As well as the filename, the directory which contains it
861
+ is provided, though not used by the current implementation.
862
+ """
863
+ return filename.endswith(self.downloadable_extensions)
864
+
865
+ def _get_project(self, name):
866
+ result = {'urls': {}, 'digests': {}}
867
+ for root, dirs, files in os.walk(self.base_dir):
868
+ for fn in files:
869
+ if self.should_include(fn, root):
870
+ fn = os.path.join(root, fn)
871
+ url = urlunparse(('file', '',
872
+ pathname2url(os.path.abspath(fn)),
873
+ '', '', ''))
874
+ info = self.convert_url_to_download_info(url, name)
875
+ if info:
876
+ self._update_version_data(result, info)
877
+ if not self.recursive:
878
+ break
879
+ return result
880
+
881
+ def get_distribution_names(self):
882
+ """
883
+ Return all the distribution names known to this locator.
884
+ """
885
+ result = set()
886
+ for root, dirs, files in os.walk(self.base_dir):
887
+ for fn in files:
888
+ if self.should_include(fn, root):
889
+ fn = os.path.join(root, fn)
890
+ url = urlunparse(('file', '',
891
+ pathname2url(os.path.abspath(fn)),
892
+ '', '', ''))
893
+ info = self.convert_url_to_download_info(url, None)
894
+ if info:
895
+ result.add(info['name'])
896
+ if not self.recursive:
897
+ break
898
+ return result
899
+
900
+ class JSONLocator(Locator):
901
+ """
902
+ This locator uses special extended metadata (not available on PyPI) and is
903
+ the basis of performant dependency resolution in distlib. Other locators
904
+ require archive downloads before dependencies can be determined! As you
905
+ might imagine, that can be slow.
906
+ """
907
+ def get_distribution_names(self):
908
+ """
909
+ Return all the distribution names known to this locator.
910
+ """
911
+ raise NotImplementedError('Not available from this locator')
912
+
913
+ def _get_project(self, name):
914
+ result = {'urls': {}, 'digests': {}}
915
+ data = get_project_data(name)
916
+ if data:
917
+ for info in data.get('files', []):
918
+ if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
919
+ continue
920
+ # We don't store summary in project metadata as it makes
921
+ # the data bigger for no benefit during dependency
922
+ # resolution
923
+ dist = make_dist(data['name'], info['version'],
924
+ summary=data.get('summary',
925
+ 'Placeholder for summary'),
926
+ scheme=self.scheme)
927
+ md = dist.metadata
928
+ md.source_url = info['url']
929
+ # TODO SHA256 digest
930
+ if 'digest' in info and info['digest']:
931
+ dist.digest = ('md5', info['digest'])
932
+ md.dependencies = info.get('requirements', {})
933
+ dist.exports = info.get('exports', {})
934
+ result[dist.version] = dist
935
+ result['urls'].setdefault(dist.version, set()).add(info['url'])
936
+ return result
937
+
938
+ class DistPathLocator(Locator):
939
+ """
940
+ This locator finds installed distributions in a path. It can be useful for
941
+ adding to an :class:`AggregatingLocator`.
942
+ """
943
+ def __init__(self, distpath, **kwargs):
944
+ """
945
+ Initialise an instance.
946
+
947
+ :param distpath: A :class:`DistributionPath` instance to search.
948
+ """
949
+ super(DistPathLocator, self).__init__(**kwargs)
950
+ assert isinstance(distpath, DistributionPath)
951
+ self.distpath = distpath
952
+
953
+ def _get_project(self, name):
954
+ dist = self.distpath.get_distribution(name)
955
+ if dist is None:
956
+ result = {'urls': {}, 'digests': {}}
957
+ else:
958
+ result = {
959
+ dist.version: dist,
960
+ 'urls': {dist.version: set([dist.source_url])},
961
+ 'digests': {dist.version: set([None])}
962
+ }
963
+ return result
964
+
965
+
966
+ class AggregatingLocator(Locator):
967
+ """
968
+ This class allows you to chain and/or merge a list of locators.
969
+ """
970
+ def __init__(self, *locators, **kwargs):
971
+ """
972
+ Initialise an instance.
973
+
974
+ :param locators: The list of locators to search.
975
+ :param kwargs: Passed to the superclass constructor,
976
+ except for:
977
+ * merge - if False (the default), the first successful
978
+ search from any of the locators is returned. If True,
979
+ the results from all locators are merged (this can be
980
+ slow).
981
+ """
982
+ self.merge = kwargs.pop('merge', False)
983
+ self.locators = locators
984
+ super(AggregatingLocator, self).__init__(**kwargs)
985
+
986
+ def clear_cache(self):
987
+ super(AggregatingLocator, self).clear_cache()
988
+ for locator in self.locators:
989
+ locator.clear_cache()
990
+
991
+ def _set_scheme(self, value):
992
+ self._scheme = value
993
+ for locator in self.locators:
994
+ locator.scheme = value
995
+
996
+ scheme = property(Locator.scheme.fget, _set_scheme)
997
+
998
+ def _get_project(self, name):
999
+ result = {}
1000
+ for locator in self.locators:
1001
+ d = locator.get_project(name)
1002
+ if d:
1003
+ if self.merge:
1004
+ files = result.get('urls', {})
1005
+ digests = result.get('digests', {})
1006
+ # next line could overwrite result['urls'], result['digests']
1007
+ result.update(d)
1008
+ df = result.get('urls')
1009
+ if files and df:
1010
+ for k, v in files.items():
1011
+ if k in df:
1012
+ df[k] |= v
1013
+ else:
1014
+ df[k] = v
1015
+ dd = result.get('digests')
1016
+ if digests and dd:
1017
+ dd.update(digests)
1018
+ else:
1019
+ # See issue #18. If any dists are found and we're looking
1020
+ # for specific constraints, we only return something if
1021
+ # a match is found. For example, if a DirectoryLocator
1022
+ # returns just foo (1.0) while we're looking for
1023
+ # foo (>= 2.0), we'll pretend there was nothing there so
1024
+ # that subsequent locators can be queried. Otherwise we
1025
+ # would just return foo (1.0) which would then lead to a
1026
+ # failure to find foo (>= 2.0), because other locators
1027
+ # weren't searched. Note that this only matters when
1028
+ # merge=False.
1029
+ if self.matcher is None:
1030
+ found = True
1031
+ else:
1032
+ found = False
1033
+ for k in d:
1034
+ if self.matcher.match(k):
1035
+ found = True
1036
+ break
1037
+ if found:
1038
+ result = d
1039
+ break
1040
+ return result
1041
+
1042
+ def get_distribution_names(self):
1043
+ """
1044
+ Return all the distribution names known to this locator.
1045
+ """
1046
+ result = set()
1047
+ for locator in self.locators:
1048
+ try:
1049
+ result |= locator.get_distribution_names()
1050
+ except NotImplementedError:
1051
+ pass
1052
+ return result
1053
+
1054
+
1055
+ # We use a legacy scheme simply because most of the dists on PyPI use legacy
1056
+ # versions which don't conform to PEP 426 / PEP 440.
1057
+ default_locator = AggregatingLocator(
1058
+ JSONLocator(),
1059
+ SimpleScrapingLocator('https://pypi.org/simple/',
1060
+ timeout=3.0),
1061
+ scheme='legacy')
1062
+
1063
+ locate = default_locator.locate
1064
+
1065
+
1066
+ class DependencyFinder(object):
1067
+ """
1068
+ Locate dependencies for distributions.
1069
+ """
1070
+
1071
+ def __init__(self, locator=None):
1072
+ """
1073
+ Initialise an instance, using the specified locator
1074
+ to locate distributions.
1075
+ """
1076
+ self.locator = locator or default_locator
1077
+ self.scheme = get_scheme(self.locator.scheme)
1078
+
1079
+ def add_distribution(self, dist):
1080
+ """
1081
+ Add a distribution to the finder. This will update internal information
1082
+ about who provides what.
1083
+ :param dist: The distribution to add.
1084
+ """
1085
+ logger.debug('adding distribution %s', dist)
1086
+ name = dist.key
1087
+ self.dists_by_name[name] = dist
1088
+ self.dists[(name, dist.version)] = dist
1089
+ for p in dist.provides:
1090
+ name, version = parse_name_and_version(p)
1091
+ logger.debug('Add to provided: %s, %s, %s', name, version, dist)
1092
+ self.provided.setdefault(name, set()).add((version, dist))
1093
+
1094
+ def remove_distribution(self, dist):
1095
+ """
1096
+ Remove a distribution from the finder. This will update internal
1097
+ information about who provides what.
1098
+ :param dist: The distribution to remove.
1099
+ """
1100
+ logger.debug('removing distribution %s', dist)
1101
+ name = dist.key
1102
+ del self.dists_by_name[name]
1103
+ del self.dists[(name, dist.version)]
1104
+ for p in dist.provides:
1105
+ name, version = parse_name_and_version(p)
1106
+ logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
1107
+ s = self.provided[name]
1108
+ s.remove((version, dist))
1109
+ if not s:
1110
+ del self.provided[name]
1111
+
1112
+ def get_matcher(self, reqt):
1113
+ """
1114
+ Get a version matcher for a requirement.
1115
+ :param reqt: The requirement
1116
+ :type reqt: str
1117
+ :return: A version matcher (an instance of
1118
+ :class:`distlib.version.Matcher`).
1119
+ """
1120
+ try:
1121
+ matcher = self.scheme.matcher(reqt)
1122
+ except UnsupportedVersionError: # pragma: no cover
1123
+ # XXX compat-mode if cannot read the version
1124
+ name = reqt.split()[0]
1125
+ matcher = self.scheme.matcher(name)
1126
+ return matcher
1127
+
1128
+ def find_providers(self, reqt):
1129
+ """
1130
+ Find the distributions which can fulfill a requirement.
1131
+
1132
+ :param reqt: The requirement.
1133
+ :type reqt: str
1134
+ :return: A set of distribution which can fulfill the requirement.
1135
+ """
1136
+ matcher = self.get_matcher(reqt)
1137
+ name = matcher.key # case-insensitive
1138
+ result = set()
1139
+ provided = self.provided
1140
+ if name in provided:
1141
+ for version, provider in provided[name]:
1142
+ try:
1143
+ match = matcher.match(version)
1144
+ except UnsupportedVersionError:
1145
+ match = False
1146
+
1147
+ if match:
1148
+ result.add(provider)
1149
+ break
1150
+ return result
1151
+
1152
+ def try_to_replace(self, provider, other, problems):
1153
+ """
1154
+ Attempt to replace one provider with another. This is typically used
1155
+ when resolving dependencies from multiple sources, e.g. A requires
1156
+ (B >= 1.0) while C requires (B >= 1.1).
1157
+
1158
+ For successful replacement, ``provider`` must meet all the requirements
1159
+ which ``other`` fulfills.
1160
+
1161
+ :param provider: The provider we are trying to replace with.
1162
+ :param other: The provider we're trying to replace.
1163
+ :param problems: If False is returned, this will contain what
1164
+ problems prevented replacement. This is currently
1165
+ a tuple of the literal string 'cantreplace',
1166
+ ``provider``, ``other`` and the set of requirements
1167
+ that ``provider`` couldn't fulfill.
1168
+ :return: True if we can replace ``other`` with ``provider``, else
1169
+ False.
1170
+ """
1171
+ rlist = self.reqts[other]
1172
+ unmatched = set()
1173
+ for s in rlist:
1174
+ matcher = self.get_matcher(s)
1175
+ if not matcher.match(provider.version):
1176
+ unmatched.add(s)
1177
+ if unmatched:
1178
+ # can't replace other with provider
1179
+ problems.add(('cantreplace', provider, other,
1180
+ frozenset(unmatched)))
1181
+ result = False
1182
+ else:
1183
+ # can replace other with provider
1184
+ self.remove_distribution(other)
1185
+ del self.reqts[other]
1186
+ for s in rlist:
1187
+ self.reqts.setdefault(provider, set()).add(s)
1188
+ self.add_distribution(provider)
1189
+ result = True
1190
+ return result
1191
+
1192
+ def find(self, requirement, meta_extras=None, prereleases=False):
1193
+ """
1194
+ Find a distribution and all distributions it depends on.
1195
+
1196
+ :param requirement: The requirement specifying the distribution to
1197
+ find, or a Distribution instance.
1198
+ :param meta_extras: A list of meta extras such as :test:, :build: and
1199
+ so on.
1200
+ :param prereleases: If ``True``, allow pre-release versions to be
1201
+ returned - otherwise, don't return prereleases
1202
+ unless they're all that's available.
1203
+
1204
+ Return a set of :class:`Distribution` instances and a set of
1205
+ problems.
1206
+
1207
+ The distributions returned should be such that they have the
1208
+ :attr:`required` attribute set to ``True`` if they were
1209
+ from the ``requirement`` passed to ``find()``, and they have the
1210
+ :attr:`build_time_dependency` attribute set to ``True`` unless they
1211
+ are post-installation dependencies of the ``requirement``.
1212
+
1213
+ The problems should be a tuple consisting of the string
1214
+ ``'unsatisfied'`` and the requirement which couldn't be satisfied
1215
+ by any distribution known to the locator.
1216
+ """
1217
+
1218
+ self.provided = {}
1219
+ self.dists = {}
1220
+ self.dists_by_name = {}
1221
+ self.reqts = {}
1222
+
1223
+ meta_extras = set(meta_extras or [])
1224
+ if ':*:' in meta_extras:
1225
+ meta_extras.remove(':*:')
1226
+ # :meta: and :run: are implicitly included
1227
+ meta_extras |= set([':test:', ':build:', ':dev:'])
1228
+
1229
+ if isinstance(requirement, Distribution):
1230
+ dist = odist = requirement
1231
+ logger.debug('passed %s as requirement', odist)
1232
+ else:
1233
+ dist = odist = self.locator.locate(requirement,
1234
+ prereleases=prereleases)
1235
+ if dist is None:
1236
+ raise DistlibException('Unable to locate %r' % requirement)
1237
+ logger.debug('located %s', odist)
1238
+ dist.requested = True
1239
+ problems = set()
1240
+ todo = set([dist])
1241
+ install_dists = set([odist])
1242
+ while todo:
1243
+ dist = todo.pop()
1244
+ name = dist.key # case-insensitive
1245
+ if name not in self.dists_by_name:
1246
+ self.add_distribution(dist)
1247
+ else:
1248
+ #import pdb; pdb.set_trace()
1249
+ other = self.dists_by_name[name]
1250
+ if other != dist:
1251
+ self.try_to_replace(dist, other, problems)
1252
+
1253
+ ireqts = dist.run_requires | dist.meta_requires
1254
+ sreqts = dist.build_requires
1255
+ ereqts = set()
1256
+ if meta_extras and dist in install_dists:
1257
+ for key in ('test', 'build', 'dev'):
1258
+ e = ':%s:' % key
1259
+ if e in meta_extras:
1260
+ ereqts |= getattr(dist, '%s_requires' % key)
1261
+ all_reqts = ireqts | sreqts | ereqts
1262
+ for r in all_reqts:
1263
+ providers = self.find_providers(r)
1264
+ if not providers:
1265
+ logger.debug('No providers found for %r', r)
1266
+ provider = self.locator.locate(r, prereleases=prereleases)
1267
+ # If no provider is found and we didn't consider
1268
+ # prereleases, consider them now.
1269
+ if provider is None and not prereleases:
1270
+ provider = self.locator.locate(r, prereleases=True)
1271
+ if provider is None:
1272
+ logger.debug('Cannot satisfy %r', r)
1273
+ problems.add(('unsatisfied', r))
1274
+ else:
1275
+ n, v = provider.key, provider.version
1276
+ if (n, v) not in self.dists:
1277
+ todo.add(provider)
1278
+ providers.add(provider)
1279
+ if r in ireqts and dist in install_dists:
1280
+ install_dists.add(provider)
1281
+ logger.debug('Adding %s to install_dists',
1282
+ provider.name_and_version)
1283
+ for p in providers:
1284
+ name = p.key
1285
+ if name not in self.dists_by_name:
1286
+ self.reqts.setdefault(p, set()).add(r)
1287
+ else:
1288
+ other = self.dists_by_name[name]
1289
+ if other != p:
1290
+ # see if other can be replaced by p
1291
+ self.try_to_replace(p, other, problems)
1292
+
1293
+ dists = set(self.dists.values())
1294
+ for dist in dists:
1295
+ dist.build_time_dependency = dist not in install_dists
1296
+ if dist.build_time_dependency:
1297
+ logger.debug('%s is a build-time dependency only.',
1298
+ dist.name_and_version)
1299
+ logger.debug('find done for %s', odist)
1300
+ return dists, problems
venv/lib/python3.10/site-packages/pip/_vendor/distlib/manifest.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2012-2013 Python Software Foundation.
4
+ # See LICENSE.txt and CONTRIBUTORS.txt.
5
+ #
6
+ """
7
+ Class representing the list of files in a distribution.
8
+
9
+ Equivalent to distutils.filelist, but fixes some problems.
10
+ """
11
+ import fnmatch
12
+ import logging
13
+ import os
14
+ import re
15
+ import sys
16
+
17
+ from . import DistlibException
18
+ from .compat import fsdecode
19
+ from .util import convert_path
20
+
21
+
22
+ __all__ = ['Manifest']
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+ # a \ followed by some spaces + EOL
27
+ _COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M)
28
+ _COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
29
+
30
+ #
31
+ # Due to the different results returned by fnmatch.translate, we need
32
+ # to do slightly different processing for Python 2.7 and 3.2 ... this needed
33
+ # to be brought in for Python 3.6 onwards.
34
+ #
35
+ _PYTHON_VERSION = sys.version_info[:2]
36
+
37
+ class Manifest(object):
38
+ """A list of files built by on exploring the filesystem and filtered by
39
+ applying various patterns to what we find there.
40
+ """
41
+
42
+ def __init__(self, base=None):
43
+ """
44
+ Initialise an instance.
45
+
46
+ :param base: The base directory to explore under.
47
+ """
48
+ self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
49
+ self.prefix = self.base + os.sep
50
+ self.allfiles = None
51
+ self.files = set()
52
+
53
+ #
54
+ # Public API
55
+ #
56
+
57
+ def findall(self):
58
+ """Find all files under the base and set ``allfiles`` to the absolute
59
+ pathnames of files found.
60
+ """
61
+ from stat import S_ISREG, S_ISDIR, S_ISLNK
62
+
63
+ self.allfiles = allfiles = []
64
+ root = self.base
65
+ stack = [root]
66
+ pop = stack.pop
67
+ push = stack.append
68
+
69
+ while stack:
70
+ root = pop()
71
+ names = os.listdir(root)
72
+
73
+ for name in names:
74
+ fullname = os.path.join(root, name)
75
+
76
+ # Avoid excess stat calls -- just one will do, thank you!
77
+ stat = os.stat(fullname)
78
+ mode = stat.st_mode
79
+ if S_ISREG(mode):
80
+ allfiles.append(fsdecode(fullname))
81
+ elif S_ISDIR(mode) and not S_ISLNK(mode):
82
+ push(fullname)
83
+
84
+ def add(self, item):
85
+ """
86
+ Add a file to the manifest.
87
+
88
+ :param item: The pathname to add. This can be relative to the base.
89
+ """
90
+ if not item.startswith(self.prefix):
91
+ item = os.path.join(self.base, item)
92
+ self.files.add(os.path.normpath(item))
93
+
94
+ def add_many(self, items):
95
+ """
96
+ Add a list of files to the manifest.
97
+
98
+ :param items: The pathnames to add. These can be relative to the base.
99
+ """
100
+ for item in items:
101
+ self.add(item)
102
+
103
+ def sorted(self, wantdirs=False):
104
+ """
105
+ Return sorted files in directory order
106
+ """
107
+
108
+ def add_dir(dirs, d):
109
+ dirs.add(d)
110
+ logger.debug('add_dir added %s', d)
111
+ if d != self.base:
112
+ parent, _ = os.path.split(d)
113
+ assert parent not in ('', '/')
114
+ add_dir(dirs, parent)
115
+
116
+ result = set(self.files) # make a copy!
117
+ if wantdirs:
118
+ dirs = set()
119
+ for f in result:
120
+ add_dir(dirs, os.path.dirname(f))
121
+ result |= dirs
122
+ return [os.path.join(*path_tuple) for path_tuple in
123
+ sorted(os.path.split(path) for path in result)]
124
+
125
+ def clear(self):
126
+ """Clear all collected files."""
127
+ self.files = set()
128
+ self.allfiles = []
129
+
130
+ def process_directive(self, directive):
131
+ """
132
+ Process a directive which either adds some files from ``allfiles`` to
133
+ ``files``, or removes some files from ``files``.
134
+
135
+ :param directive: The directive to process. This should be in a format
136
+ compatible with distutils ``MANIFEST.in`` files:
137
+
138
+ http://docs.python.org/distutils/sourcedist.html#commands
139
+ """
140
+ # Parse the line: split it up, make sure the right number of words
141
+ # is there, and return the relevant words. 'action' is always
142
+ # defined: it's the first word of the line. Which of the other
143
+ # three are defined depends on the action; it'll be either
144
+ # patterns, (dir and patterns), or (dirpattern).
145
+ action, patterns, thedir, dirpattern = self._parse_directive(directive)
146
+
147
+ # OK, now we know that the action is valid and we have the
148
+ # right number of words on the line for that action -- so we
149
+ # can proceed with minimal error-checking.
150
+ if action == 'include':
151
+ for pattern in patterns:
152
+ if not self._include_pattern(pattern, anchor=True):
153
+ logger.warning('no files found matching %r', pattern)
154
+
155
+ elif action == 'exclude':
156
+ for pattern in patterns:
157
+ found = self._exclude_pattern(pattern, anchor=True)
158
+ #if not found:
159
+ # logger.warning('no previously-included files '
160
+ # 'found matching %r', pattern)
161
+
162
+ elif action == 'global-include':
163
+ for pattern in patterns:
164
+ if not self._include_pattern(pattern, anchor=False):
165
+ logger.warning('no files found matching %r '
166
+ 'anywhere in distribution', pattern)
167
+
168
+ elif action == 'global-exclude':
169
+ for pattern in patterns:
170
+ found = self._exclude_pattern(pattern, anchor=False)
171
+ #if not found:
172
+ # logger.warning('no previously-included files '
173
+ # 'matching %r found anywhere in '
174
+ # 'distribution', pattern)
175
+
176
+ elif action == 'recursive-include':
177
+ for pattern in patterns:
178
+ if not self._include_pattern(pattern, prefix=thedir):
179
+ logger.warning('no files found matching %r '
180
+ 'under directory %r', pattern, thedir)
181
+
182
+ elif action == 'recursive-exclude':
183
+ for pattern in patterns:
184
+ found = self._exclude_pattern(pattern, prefix=thedir)
185
+ #if not found:
186
+ # logger.warning('no previously-included files '
187
+ # 'matching %r found under directory %r',
188
+ # pattern, thedir)
189
+
190
+ elif action == 'graft':
191
+ if not self._include_pattern(None, prefix=dirpattern):
192
+ logger.warning('no directories found matching %r',
193
+ dirpattern)
194
+
195
+ elif action == 'prune':
196
+ if not self._exclude_pattern(None, prefix=dirpattern):
197
+ logger.warning('no previously-included directories found '
198
+ 'matching %r', dirpattern)
199
+ else: # pragma: no cover
200
+ # This should never happen, as it should be caught in
201
+ # _parse_template_line
202
+ raise DistlibException(
203
+ 'invalid action %r' % action)
204
+
205
+ #
206
+ # Private API
207
+ #
208
+
209
+ def _parse_directive(self, directive):
210
+ """
211
+ Validate a directive.
212
+ :param directive: The directive to validate.
213
+ :return: A tuple of action, patterns, thedir, dir_patterns
214
+ """
215
+ words = directive.split()
216
+ if len(words) == 1 and words[0] not in ('include', 'exclude',
217
+ 'global-include',
218
+ 'global-exclude',
219
+ 'recursive-include',
220
+ 'recursive-exclude',
221
+ 'graft', 'prune'):
222
+ # no action given, let's use the default 'include'
223
+ words.insert(0, 'include')
224
+
225
+ action = words[0]
226
+ patterns = thedir = dir_pattern = None
227
+
228
+ if action in ('include', 'exclude',
229
+ 'global-include', 'global-exclude'):
230
+ if len(words) < 2:
231
+ raise DistlibException(
232
+ '%r expects <pattern1> <pattern2> ...' % action)
233
+
234
+ patterns = [convert_path(word) for word in words[1:]]
235
+
236
+ elif action in ('recursive-include', 'recursive-exclude'):
237
+ if len(words) < 3:
238
+ raise DistlibException(
239
+ '%r expects <dir> <pattern1> <pattern2> ...' % action)
240
+
241
+ thedir = convert_path(words[1])
242
+ patterns = [convert_path(word) for word in words[2:]]
243
+
244
+ elif action in ('graft', 'prune'):
245
+ if len(words) != 2:
246
+ raise DistlibException(
247
+ '%r expects a single <dir_pattern>' % action)
248
+
249
+ dir_pattern = convert_path(words[1])
250
+
251
+ else:
252
+ raise DistlibException('unknown action %r' % action)
253
+
254
+ return action, patterns, thedir, dir_pattern
255
+
256
+ def _include_pattern(self, pattern, anchor=True, prefix=None,
257
+ is_regex=False):
258
+ """Select strings (presumably filenames) from 'self.files' that
259
+ match 'pattern', a Unix-style wildcard (glob) pattern.
260
+
261
+ Patterns are not quite the same as implemented by the 'fnmatch'
262
+ module: '*' and '?' match non-special characters, where "special"
263
+ is platform-dependent: slash on Unix; colon, slash, and backslash on
264
+ DOS/Windows; and colon on Mac OS.
265
+
266
+ If 'anchor' is true (the default), then the pattern match is more
267
+ stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
268
+ 'anchor' is false, both of these will match.
269
+
270
+ If 'prefix' is supplied, then only filenames starting with 'prefix'
271
+ (itself a pattern) and ending with 'pattern', with anything in between
272
+ them, will match. 'anchor' is ignored in this case.
273
+
274
+ If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
275
+ 'pattern' is assumed to be either a string containing a regex or a
276
+ regex object -- no translation is done, the regex is just compiled
277
+ and used as-is.
278
+
279
+ Selected strings will be added to self.files.
280
+
281
+ Return True if files are found.
282
+ """
283
+ # XXX docstring lying about what the special chars are?
284
+ found = False
285
+ pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
286
+
287
+ # delayed loading of allfiles list
288
+ if self.allfiles is None:
289
+ self.findall()
290
+
291
+ for name in self.allfiles:
292
+ if pattern_re.search(name):
293
+ self.files.add(name)
294
+ found = True
295
+ return found
296
+
297
+ def _exclude_pattern(self, pattern, anchor=True, prefix=None,
298
+ is_regex=False):
299
+ """Remove strings (presumably filenames) from 'files' that match
300
+ 'pattern'.
301
+
302
+ Other parameters are the same as for 'include_pattern()', above.
303
+ The list 'self.files' is modified in place. Return True if files are
304
+ found.
305
+
306
+ This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
307
+ packaging source distributions
308
+ """
309
+ found = False
310
+ pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
311
+ for f in list(self.files):
312
+ if pattern_re.search(f):
313
+ self.files.remove(f)
314
+ found = True
315
+ return found
316
+
317
+ def _translate_pattern(self, pattern, anchor=True, prefix=None,
318
+ is_regex=False):
319
+ """Translate a shell-like wildcard pattern to a compiled regular
320
+ expression.
321
+
322
+ Return the compiled regex. If 'is_regex' true,
323
+ then 'pattern' is directly compiled to a regex (if it's a string)
324
+ or just returned as-is (assumes it's a regex object).
325
+ """
326
+ if is_regex:
327
+ if isinstance(pattern, str):
328
+ return re.compile(pattern)
329
+ else:
330
+ return pattern
331
+
332
+ if _PYTHON_VERSION > (3, 2):
333
+ # ditch start and end characters
334
+ start, _, end = self._glob_to_re('_').partition('_')
335
+
336
+ if pattern:
337
+ pattern_re = self._glob_to_re(pattern)
338
+ if _PYTHON_VERSION > (3, 2):
339
+ assert pattern_re.startswith(start) and pattern_re.endswith(end)
340
+ else:
341
+ pattern_re = ''
342
+
343
+ base = re.escape(os.path.join(self.base, ''))
344
+ if prefix is not None:
345
+ # ditch end of pattern character
346
+ if _PYTHON_VERSION <= (3, 2):
347
+ empty_pattern = self._glob_to_re('')
348
+ prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
349
+ else:
350
+ prefix_re = self._glob_to_re(prefix)
351
+ assert prefix_re.startswith(start) and prefix_re.endswith(end)
352
+ prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
353
+ sep = os.sep
354
+ if os.sep == '\\':
355
+ sep = r'\\'
356
+ if _PYTHON_VERSION <= (3, 2):
357
+ pattern_re = '^' + base + sep.join((prefix_re,
358
+ '.*' + pattern_re))
359
+ else:
360
+ pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
361
+ pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
362
+ pattern_re, end)
363
+ else: # no prefix -- respect anchor flag
364
+ if anchor:
365
+ if _PYTHON_VERSION <= (3, 2):
366
+ pattern_re = '^' + base + pattern_re
367
+ else:
368
+ pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
369
+
370
+ return re.compile(pattern_re)
371
+
372
+ def _glob_to_re(self, pattern):
373
+ """Translate a shell-like glob pattern to a regular expression.
374
+
375
+ Return a string containing the regex. Differs from
376
+ 'fnmatch.translate()' in that '*' does not match "special characters"
377
+ (which are platform-specific).
378
+ """
379
+ pattern_re = fnmatch.translate(pattern)
380
+
381
+ # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
382
+ # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
383
+ # and by extension they shouldn't match such "special characters" under
384
+ # any OS. So change all non-escaped dots in the RE to match any
385
+ # character except the special characters (currently: just os.sep).
386
+ sep = os.sep
387
+ if os.sep == '\\':
388
+ # we're using a regex to manipulate a regex, so we need
389
+ # to escape the backslash twice
390
+ sep = r'\\\\'
391
+ escaped = r'\1[^%s]' % sep
392
+ pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
393
+ return pattern_re
venv/lib/python3.10/site-packages/pip/_vendor/distlib/markers.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2012-2017 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+ """
8
+ Parser for the environment markers micro-language defined in PEP 508.
9
+ """
10
+
11
+ # Note: In PEP 345, the micro-language was Python compatible, so the ast
12
+ # module could be used to parse it. However, PEP 508 introduced operators such
13
+ # as ~= and === which aren't in Python, necessitating a different approach.
14
+
15
+ import os
16
+ import re
17
+ import sys
18
+ import platform
19
+
20
+ from .compat import string_types
21
+ from .util import in_venv, parse_marker
22
+ from .version import NormalizedVersion as NV
23
+
24
+ __all__ = ['interpret']
25
+
26
+ _VERSION_PATTERN = re.compile(r'((\d+(\.\d+)*\w*)|\'(\d+(\.\d+)*\w*)\'|\"(\d+(\.\d+)*\w*)\")')
27
+
28
+ def _is_literal(o):
29
+ if not isinstance(o, string_types) or not o:
30
+ return False
31
+ return o[0] in '\'"'
32
+
33
+ def _get_versions(s):
34
+ result = []
35
+ for m in _VERSION_PATTERN.finditer(s):
36
+ result.append(NV(m.groups()[0]))
37
+ return set(result)
38
+
39
+ class Evaluator(object):
40
+ """
41
+ This class is used to evaluate marker expessions.
42
+ """
43
+
44
+ operations = {
45
+ '==': lambda x, y: x == y,
46
+ '===': lambda x, y: x == y,
47
+ '~=': lambda x, y: x == y or x > y,
48
+ '!=': lambda x, y: x != y,
49
+ '<': lambda x, y: x < y,
50
+ '<=': lambda x, y: x == y or x < y,
51
+ '>': lambda x, y: x > y,
52
+ '>=': lambda x, y: x == y or x > y,
53
+ 'and': lambda x, y: x and y,
54
+ 'or': lambda x, y: x or y,
55
+ 'in': lambda x, y: x in y,
56
+ 'not in': lambda x, y: x not in y,
57
+ }
58
+
59
+ def evaluate(self, expr, context):
60
+ """
61
+ Evaluate a marker expression returned by the :func:`parse_requirement`
62
+ function in the specified context.
63
+ """
64
+ if isinstance(expr, string_types):
65
+ if expr[0] in '\'"':
66
+ result = expr[1:-1]
67
+ else:
68
+ if expr not in context:
69
+ raise SyntaxError('unknown variable: %s' % expr)
70
+ result = context[expr]
71
+ else:
72
+ assert isinstance(expr, dict)
73
+ op = expr['op']
74
+ if op not in self.operations:
75
+ raise NotImplementedError('op not implemented: %s' % op)
76
+ elhs = expr['lhs']
77
+ erhs = expr['rhs']
78
+ if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
79
+ raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
80
+
81
+ lhs = self.evaluate(elhs, context)
82
+ rhs = self.evaluate(erhs, context)
83
+ if ((elhs == 'python_version' or erhs == 'python_version') and
84
+ op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):
85
+ lhs = NV(lhs)
86
+ rhs = NV(rhs)
87
+ elif elhs == 'python_version' and op in ('in', 'not in'):
88
+ lhs = NV(lhs)
89
+ rhs = _get_versions(rhs)
90
+ result = self.operations[op](lhs, rhs)
91
+ return result
92
+
93
+ _DIGITS = re.compile(r'\d+\.\d+')
94
+
95
+ def default_context():
96
+ def format_full_version(info):
97
+ version = '%s.%s.%s' % (info.major, info.minor, info.micro)
98
+ kind = info.releaselevel
99
+ if kind != 'final':
100
+ version += kind[0] + str(info.serial)
101
+ return version
102
+
103
+ if hasattr(sys, 'implementation'):
104
+ implementation_version = format_full_version(sys.implementation.version)
105
+ implementation_name = sys.implementation.name
106
+ else:
107
+ implementation_version = '0'
108
+ implementation_name = ''
109
+
110
+ ppv = platform.python_version()
111
+ m = _DIGITS.match(ppv)
112
+ pv = m.group(0)
113
+ result = {
114
+ 'implementation_name': implementation_name,
115
+ 'implementation_version': implementation_version,
116
+ 'os_name': os.name,
117
+ 'platform_machine': platform.machine(),
118
+ 'platform_python_implementation': platform.python_implementation(),
119
+ 'platform_release': platform.release(),
120
+ 'platform_system': platform.system(),
121
+ 'platform_version': platform.version(),
122
+ 'platform_in_venv': str(in_venv()),
123
+ 'python_full_version': ppv,
124
+ 'python_version': pv,
125
+ 'sys_platform': sys.platform,
126
+ }
127
+ return result
128
+
129
+ DEFAULT_CONTEXT = default_context()
130
+ del default_context
131
+
132
+ evaluator = Evaluator()
133
+
134
+ def interpret(marker, execution_context=None):
135
+ """
136
+ Interpret a marker and return a result depending on environment.
137
+
138
+ :param marker: The marker to interpret.
139
+ :type marker: str
140
+ :param execution_context: The context used for name lookup.
141
+ :type execution_context: mapping
142
+ """
143
+ try:
144
+ expr, rest = parse_marker(marker)
145
+ except Exception as e:
146
+ raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e))
147
+ if rest and rest[0] != '#':
148
+ raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest))
149
+ context = dict(DEFAULT_CONTEXT)
150
+ if execution_context:
151
+ context.update(execution_context)
152
+ return evaluator.evaluate(expr, context)
venv/lib/python3.10/site-packages/pip/_vendor/distlib/metadata.py ADDED
@@ -0,0 +1,1058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2012 The Python Software Foundation.
4
+ # See LICENSE.txt and CONTRIBUTORS.txt.
5
+ #
6
+ """Implementation of the Metadata for Python packages PEPs.
7
+
8
+ Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and withdrawn 2.0).
9
+ """
10
+ from __future__ import unicode_literals
11
+
12
+ import codecs
13
+ from email import message_from_file
14
+ import json
15
+ import logging
16
+ import re
17
+
18
+
19
+ from . import DistlibException, __version__
20
+ from .compat import StringIO, string_types, text_type
21
+ from .markers import interpret
22
+ from .util import extract_by_key, get_extras
23
+ from .version import get_scheme, PEP440_VERSION_RE
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class MetadataMissingError(DistlibException):
29
+ """A required metadata is missing"""
30
+
31
+
32
+ class MetadataConflictError(DistlibException):
33
+ """Attempt to read or write metadata fields that are conflictual."""
34
+
35
+
36
+ class MetadataUnrecognizedVersionError(DistlibException):
37
+ """Unknown metadata version number."""
38
+
39
+
40
+ class MetadataInvalidError(DistlibException):
41
+ """A metadata value is invalid"""
42
+
43
+ # public API of this module
44
+ __all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
45
+
46
+ # Encoding used for the PKG-INFO files
47
+ PKG_INFO_ENCODING = 'utf-8'
48
+
49
+ # preferred version. Hopefully will be changed
50
+ # to 1.2 once PEP 345 is supported everywhere
51
+ PKG_INFO_PREFERRED_VERSION = '1.1'
52
+
53
+ _LINE_PREFIX_1_2 = re.compile('\n \\|')
54
+ _LINE_PREFIX_PRE_1_2 = re.compile('\n ')
55
+ _241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
56
+ 'Summary', 'Description',
57
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
58
+ 'License')
59
+
60
+ _314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
61
+ 'Supported-Platform', 'Summary', 'Description',
62
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
63
+ 'License', 'Classifier', 'Download-URL', 'Obsoletes',
64
+ 'Provides', 'Requires')
65
+
66
+ _314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
67
+ 'Download-URL')
68
+
69
+ _345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
70
+ 'Supported-Platform', 'Summary', 'Description',
71
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
72
+ 'Maintainer', 'Maintainer-email', 'License',
73
+ 'Classifier', 'Download-URL', 'Obsoletes-Dist',
74
+ 'Project-URL', 'Provides-Dist', 'Requires-Dist',
75
+ 'Requires-Python', 'Requires-External')
76
+
77
+ _345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
78
+ 'Obsoletes-Dist', 'Requires-External', 'Maintainer',
79
+ 'Maintainer-email', 'Project-URL')
80
+
81
+ _426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
82
+ 'Supported-Platform', 'Summary', 'Description',
83
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
84
+ 'Maintainer', 'Maintainer-email', 'License',
85
+ 'Classifier', 'Download-URL', 'Obsoletes-Dist',
86
+ 'Project-URL', 'Provides-Dist', 'Requires-Dist',
87
+ 'Requires-Python', 'Requires-External', 'Private-Version',
88
+ 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
89
+ 'Provides-Extra')
90
+
91
+ _426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
92
+ 'Setup-Requires-Dist', 'Extension')
93
+
94
+ # See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in
95
+ # the metadata. Include them in the tuple literal below to allow them
96
+ # (for now).
97
+ # Ditto for Obsoletes - see issue #140.
98
+ _566_FIELDS = _426_FIELDS + ('Description-Content-Type',
99
+ 'Requires', 'Provides', 'Obsoletes')
100
+
101
+ _566_MARKERS = ('Description-Content-Type',)
102
+
103
+ _ALL_FIELDS = set()
104
+ _ALL_FIELDS.update(_241_FIELDS)
105
+ _ALL_FIELDS.update(_314_FIELDS)
106
+ _ALL_FIELDS.update(_345_FIELDS)
107
+ _ALL_FIELDS.update(_426_FIELDS)
108
+ _ALL_FIELDS.update(_566_FIELDS)
109
+
110
+ EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
111
+
112
+
113
+ def _version2fieldlist(version):
114
+ if version == '1.0':
115
+ return _241_FIELDS
116
+ elif version == '1.1':
117
+ return _314_FIELDS
118
+ elif version == '1.2':
119
+ return _345_FIELDS
120
+ elif version in ('1.3', '2.1'):
121
+ # avoid adding field names if already there
122
+ return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS)
123
+ elif version == '2.0':
124
+ return _426_FIELDS
125
+ raise MetadataUnrecognizedVersionError(version)
126
+
127
+
128
+ def _best_version(fields):
129
+ """Detect the best version depending on the fields used."""
130
+ def _has_marker(keys, markers):
131
+ for marker in markers:
132
+ if marker in keys:
133
+ return True
134
+ return False
135
+
136
+ keys = []
137
+ for key, value in fields.items():
138
+ if value in ([], 'UNKNOWN', None):
139
+ continue
140
+ keys.append(key)
141
+
142
+ possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1']
143
+
144
+ # first let's try to see if a field is not part of one of the version
145
+ for key in keys:
146
+ if key not in _241_FIELDS and '1.0' in possible_versions:
147
+ possible_versions.remove('1.0')
148
+ logger.debug('Removed 1.0 due to %s', key)
149
+ if key not in _314_FIELDS and '1.1' in possible_versions:
150
+ possible_versions.remove('1.1')
151
+ logger.debug('Removed 1.1 due to %s', key)
152
+ if key not in _345_FIELDS and '1.2' in possible_versions:
153
+ possible_versions.remove('1.2')
154
+ logger.debug('Removed 1.2 due to %s', key)
155
+ if key not in _566_FIELDS and '1.3' in possible_versions:
156
+ possible_versions.remove('1.3')
157
+ logger.debug('Removed 1.3 due to %s', key)
158
+ if key not in _566_FIELDS and '2.1' in possible_versions:
159
+ if key != 'Description': # In 2.1, description allowed after headers
160
+ possible_versions.remove('2.1')
161
+ logger.debug('Removed 2.1 due to %s', key)
162
+ if key not in _426_FIELDS and '2.0' in possible_versions:
163
+ possible_versions.remove('2.0')
164
+ logger.debug('Removed 2.0 due to %s', key)
165
+
166
+ # possible_version contains qualified versions
167
+ if len(possible_versions) == 1:
168
+ return possible_versions[0] # found !
169
+ elif len(possible_versions) == 0:
170
+ logger.debug('Out of options - unknown metadata set: %s', fields)
171
+ raise MetadataConflictError('Unknown metadata set')
172
+
173
+ # let's see if one unique marker is found
174
+ is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
175
+ is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
176
+ is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS)
177
+ is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
178
+ if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1:
179
+ raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields')
180
+
181
+ # we have the choice, 1.0, or 1.2, or 2.0
182
+ # - 1.0 has a broken Summary field but works with all tools
183
+ # - 1.1 is to avoid
184
+ # - 1.2 fixes Summary but has little adoption
185
+ # - 2.0 adds more features and is very new
186
+ if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0:
187
+ # we couldn't find any specific marker
188
+ if PKG_INFO_PREFERRED_VERSION in possible_versions:
189
+ return PKG_INFO_PREFERRED_VERSION
190
+ if is_1_1:
191
+ return '1.1'
192
+ if is_1_2:
193
+ return '1.2'
194
+ if is_2_1:
195
+ return '2.1'
196
+
197
+ return '2.0'
198
+
199
+ # This follows the rules about transforming keys as described in
200
+ # https://www.python.org/dev/peps/pep-0566/#id17
201
+ _ATTR2FIELD = {
202
+ name.lower().replace("-", "_"): name for name in _ALL_FIELDS
203
+ }
204
+ _FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()}
205
+
206
+ _PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
207
+ _VERSIONS_FIELDS = ('Requires-Python',)
208
+ _VERSION_FIELDS = ('Version',)
209
+ _LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
210
+ 'Requires', 'Provides', 'Obsoletes-Dist',
211
+ 'Provides-Dist', 'Requires-Dist', 'Requires-External',
212
+ 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
213
+ 'Provides-Extra', 'Extension')
214
+ _LISTTUPLEFIELDS = ('Project-URL',)
215
+
216
+ _ELEMENTSFIELD = ('Keywords',)
217
+
218
+ _UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
219
+
220
+ _MISSING = object()
221
+
222
+ _FILESAFE = re.compile('[^A-Za-z0-9.]+')
223
+
224
+
225
+ def _get_name_and_version(name, version, for_filename=False):
226
+ """Return the distribution name with version.
227
+
228
+ If for_filename is true, return a filename-escaped form."""
229
+ if for_filename:
230
+ # For both name and version any runs of non-alphanumeric or '.'
231
+ # characters are replaced with a single '-'. Additionally any
232
+ # spaces in the version string become '.'
233
+ name = _FILESAFE.sub('-', name)
234
+ version = _FILESAFE.sub('-', version.replace(' ', '.'))
235
+ return '%s-%s' % (name, version)
236
+
237
+
238
+ class LegacyMetadata(object):
239
+ """The legacy metadata of a release.
240
+
241
+ Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can
242
+ instantiate the class with one of these arguments (or none):
243
+ - *path*, the path to a metadata file
244
+ - *fileobj* give a file-like object with metadata as content
245
+ - *mapping* is a dict-like object
246
+ - *scheme* is a version scheme name
247
+ """
248
+ # TODO document the mapping API and UNKNOWN default key
249
+
250
+ def __init__(self, path=None, fileobj=None, mapping=None,
251
+ scheme='default'):
252
+ if [path, fileobj, mapping].count(None) < 2:
253
+ raise TypeError('path, fileobj and mapping are exclusive')
254
+ self._fields = {}
255
+ self.requires_files = []
256
+ self._dependencies = None
257
+ self.scheme = scheme
258
+ if path is not None:
259
+ self.read(path)
260
+ elif fileobj is not None:
261
+ self.read_file(fileobj)
262
+ elif mapping is not None:
263
+ self.update(mapping)
264
+ self.set_metadata_version()
265
+
266
+ def set_metadata_version(self):
267
+ self._fields['Metadata-Version'] = _best_version(self._fields)
268
+
269
+ def _write_field(self, fileobj, name, value):
270
+ fileobj.write('%s: %s\n' % (name, value))
271
+
272
+ def __getitem__(self, name):
273
+ return self.get(name)
274
+
275
+ def __setitem__(self, name, value):
276
+ return self.set(name, value)
277
+
278
+ def __delitem__(self, name):
279
+ field_name = self._convert_name(name)
280
+ try:
281
+ del self._fields[field_name]
282
+ except KeyError:
283
+ raise KeyError(name)
284
+
285
+ def __contains__(self, name):
286
+ return (name in self._fields or
287
+ self._convert_name(name) in self._fields)
288
+
289
+ def _convert_name(self, name):
290
+ if name in _ALL_FIELDS:
291
+ return name
292
+ name = name.replace('-', '_').lower()
293
+ return _ATTR2FIELD.get(name, name)
294
+
295
+ def _default_value(self, name):
296
+ if name in _LISTFIELDS or name in _ELEMENTSFIELD:
297
+ return []
298
+ return 'UNKNOWN'
299
+
300
+ def _remove_line_prefix(self, value):
301
+ if self.metadata_version in ('1.0', '1.1'):
302
+ return _LINE_PREFIX_PRE_1_2.sub('\n', value)
303
+ else:
304
+ return _LINE_PREFIX_1_2.sub('\n', value)
305
+
306
+ def __getattr__(self, name):
307
+ if name in _ATTR2FIELD:
308
+ return self[name]
309
+ raise AttributeError(name)
310
+
311
+ #
312
+ # Public API
313
+ #
314
+
315
+ # dependencies = property(_get_dependencies, _set_dependencies)
316
+
317
+ def get_fullname(self, filesafe=False):
318
+ """Return the distribution name with version.
319
+
320
+ If filesafe is true, return a filename-escaped form."""
321
+ return _get_name_and_version(self['Name'], self['Version'], filesafe)
322
+
323
+ def is_field(self, name):
324
+ """return True if name is a valid metadata key"""
325
+ name = self._convert_name(name)
326
+ return name in _ALL_FIELDS
327
+
328
+ def is_multi_field(self, name):
329
+ name = self._convert_name(name)
330
+ return name in _LISTFIELDS
331
+
332
+ def read(self, filepath):
333
+ """Read the metadata values from a file path."""
334
+ fp = codecs.open(filepath, 'r', encoding='utf-8')
335
+ try:
336
+ self.read_file(fp)
337
+ finally:
338
+ fp.close()
339
+
340
+ def read_file(self, fileob):
341
+ """Read the metadata values from a file object."""
342
+ msg = message_from_file(fileob)
343
+ self._fields['Metadata-Version'] = msg['metadata-version']
344
+
345
+ # When reading, get all the fields we can
346
+ for field in _ALL_FIELDS:
347
+ if field not in msg:
348
+ continue
349
+ if field in _LISTFIELDS:
350
+ # we can have multiple lines
351
+ values = msg.get_all(field)
352
+ if field in _LISTTUPLEFIELDS and values is not None:
353
+ values = [tuple(value.split(',')) for value in values]
354
+ self.set(field, values)
355
+ else:
356
+ # single line
357
+ value = msg[field]
358
+ if value is not None and value != 'UNKNOWN':
359
+ self.set(field, value)
360
+
361
+ # PEP 566 specifies that the body be used for the description, if
362
+ # available
363
+ body = msg.get_payload()
364
+ self["Description"] = body if body else self["Description"]
365
+ # logger.debug('Attempting to set metadata for %s', self)
366
+ # self.set_metadata_version()
367
+
368
+ def write(self, filepath, skip_unknown=False):
369
+ """Write the metadata fields to filepath."""
370
+ fp = codecs.open(filepath, 'w', encoding='utf-8')
371
+ try:
372
+ self.write_file(fp, skip_unknown)
373
+ finally:
374
+ fp.close()
375
+
376
+ def write_file(self, fileobject, skip_unknown=False):
377
+ """Write the PKG-INFO format data to a file object."""
378
+ self.set_metadata_version()
379
+
380
+ for field in _version2fieldlist(self['Metadata-Version']):
381
+ values = self.get(field)
382
+ if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
383
+ continue
384
+ if field in _ELEMENTSFIELD:
385
+ self._write_field(fileobject, field, ','.join(values))
386
+ continue
387
+ if field not in _LISTFIELDS:
388
+ if field == 'Description':
389
+ if self.metadata_version in ('1.0', '1.1'):
390
+ values = values.replace('\n', '\n ')
391
+ else:
392
+ values = values.replace('\n', '\n |')
393
+ values = [values]
394
+
395
+ if field in _LISTTUPLEFIELDS:
396
+ values = [','.join(value) for value in values]
397
+
398
+ for value in values:
399
+ self._write_field(fileobject, field, value)
400
+
401
+ def update(self, other=None, **kwargs):
402
+ """Set metadata values from the given iterable `other` and kwargs.
403
+
404
+ Behavior is like `dict.update`: If `other` has a ``keys`` method,
405
+ they are looped over and ``self[key]`` is assigned ``other[key]``.
406
+ Else, ``other`` is an iterable of ``(key, value)`` iterables.
407
+
408
+ Keys that don't match a metadata field or that have an empty value are
409
+ dropped.
410
+ """
411
+ def _set(key, value):
412
+ if key in _ATTR2FIELD and value:
413
+ self.set(self._convert_name(key), value)
414
+
415
+ if not other:
416
+ # other is None or empty container
417
+ pass
418
+ elif hasattr(other, 'keys'):
419
+ for k in other.keys():
420
+ _set(k, other[k])
421
+ else:
422
+ for k, v in other:
423
+ _set(k, v)
424
+
425
+ if kwargs:
426
+ for k, v in kwargs.items():
427
+ _set(k, v)
428
+
429
+ def set(self, name, value):
430
+ """Control then set a metadata field."""
431
+ name = self._convert_name(name)
432
+
433
+ if ((name in _ELEMENTSFIELD or name == 'Platform') and
434
+ not isinstance(value, (list, tuple))):
435
+ if isinstance(value, string_types):
436
+ value = [v.strip() for v in value.split(',')]
437
+ else:
438
+ value = []
439
+ elif (name in _LISTFIELDS and
440
+ not isinstance(value, (list, tuple))):
441
+ if isinstance(value, string_types):
442
+ value = [value]
443
+ else:
444
+ value = []
445
+
446
+ if logger.isEnabledFor(logging.WARNING):
447
+ project_name = self['Name']
448
+
449
+ scheme = get_scheme(self.scheme)
450
+ if name in _PREDICATE_FIELDS and value is not None:
451
+ for v in value:
452
+ # check that the values are valid
453
+ if not scheme.is_valid_matcher(v.split(';')[0]):
454
+ logger.warning(
455
+ "'%s': '%s' is not valid (field '%s')",
456
+ project_name, v, name)
457
+ # FIXME this rejects UNKNOWN, is that right?
458
+ elif name in _VERSIONS_FIELDS and value is not None:
459
+ if not scheme.is_valid_constraint_list(value):
460
+ logger.warning("'%s': '%s' is not a valid version (field '%s')",
461
+ project_name, value, name)
462
+ elif name in _VERSION_FIELDS and value is not None:
463
+ if not scheme.is_valid_version(value):
464
+ logger.warning("'%s': '%s' is not a valid version (field '%s')",
465
+ project_name, value, name)
466
+
467
+ if name in _UNICODEFIELDS:
468
+ if name == 'Description':
469
+ value = self._remove_line_prefix(value)
470
+
471
+ self._fields[name] = value
472
+
473
+ def get(self, name, default=_MISSING):
474
+ """Get a metadata field."""
475
+ name = self._convert_name(name)
476
+ if name not in self._fields:
477
+ if default is _MISSING:
478
+ default = self._default_value(name)
479
+ return default
480
+ if name in _UNICODEFIELDS:
481
+ value = self._fields[name]
482
+ return value
483
+ elif name in _LISTFIELDS:
484
+ value = self._fields[name]
485
+ if value is None:
486
+ return []
487
+ res = []
488
+ for val in value:
489
+ if name not in _LISTTUPLEFIELDS:
490
+ res.append(val)
491
+ else:
492
+ # That's for Project-URL
493
+ res.append((val[0], val[1]))
494
+ return res
495
+
496
+ elif name in _ELEMENTSFIELD:
497
+ value = self._fields[name]
498
+ if isinstance(value, string_types):
499
+ return value.split(',')
500
+ return self._fields[name]
501
+
502
+ def check(self, strict=False):
503
+ """Check if the metadata is compliant. If strict is True then raise if
504
+ no Name or Version are provided"""
505
+ self.set_metadata_version()
506
+
507
+ # XXX should check the versions (if the file was loaded)
508
+ missing, warnings = [], []
509
+
510
+ for attr in ('Name', 'Version'): # required by PEP 345
511
+ if attr not in self:
512
+ missing.append(attr)
513
+
514
+ if strict and missing != []:
515
+ msg = 'missing required metadata: %s' % ', '.join(missing)
516
+ raise MetadataMissingError(msg)
517
+
518
+ for attr in ('Home-page', 'Author'):
519
+ if attr not in self:
520
+ missing.append(attr)
521
+
522
+ # checking metadata 1.2 (XXX needs to check 1.1, 1.0)
523
+ if self['Metadata-Version'] != '1.2':
524
+ return missing, warnings
525
+
526
+ scheme = get_scheme(self.scheme)
527
+
528
+ def are_valid_constraints(value):
529
+ for v in value:
530
+ if not scheme.is_valid_matcher(v.split(';')[0]):
531
+ return False
532
+ return True
533
+
534
+ for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
535
+ (_VERSIONS_FIELDS,
536
+ scheme.is_valid_constraint_list),
537
+ (_VERSION_FIELDS,
538
+ scheme.is_valid_version)):
539
+ for field in fields:
540
+ value = self.get(field, None)
541
+ if value is not None and not controller(value):
542
+ warnings.append("Wrong value for '%s': %s" % (field, value))
543
+
544
+ return missing, warnings
545
+
546
+ def todict(self, skip_missing=False):
547
+ """Return fields as a dict.
548
+
549
+ Field names will be converted to use the underscore-lowercase style
550
+ instead of hyphen-mixed case (i.e. home_page instead of Home-page).
551
+ This is as per https://www.python.org/dev/peps/pep-0566/#id17.
552
+ """
553
+ self.set_metadata_version()
554
+
555
+ fields = _version2fieldlist(self['Metadata-Version'])
556
+
557
+ data = {}
558
+
559
+ for field_name in fields:
560
+ if not skip_missing or field_name in self._fields:
561
+ key = _FIELD2ATTR[field_name]
562
+ if key != 'project_url':
563
+ data[key] = self[field_name]
564
+ else:
565
+ data[key] = [','.join(u) for u in self[field_name]]
566
+
567
+ return data
568
+
569
+ def add_requirements(self, requirements):
570
+ if self['Metadata-Version'] == '1.1':
571
+ # we can't have 1.1 metadata *and* Setuptools requires
572
+ for field in ('Obsoletes', 'Requires', 'Provides'):
573
+ if field in self:
574
+ del self[field]
575
+ self['Requires-Dist'] += requirements
576
+
577
+ # Mapping API
578
+ # TODO could add iter* variants
579
+
580
+ def keys(self):
581
+ return list(_version2fieldlist(self['Metadata-Version']))
582
+
583
+ def __iter__(self):
584
+ for key in self.keys():
585
+ yield key
586
+
587
+ def values(self):
588
+ return [self[key] for key in self.keys()]
589
+
590
+ def items(self):
591
+ return [(key, self[key]) for key in self.keys()]
592
+
593
+ def __repr__(self):
594
+ return '<%s %s %s>' % (self.__class__.__name__, self.name,
595
+ self.version)
596
+
597
+
598
+ METADATA_FILENAME = 'pydist.json'
599
+ WHEEL_METADATA_FILENAME = 'metadata.json'
600
+ LEGACY_METADATA_FILENAME = 'METADATA'
601
+
602
+
603
+ class Metadata(object):
604
+ """
605
+ The metadata of a release. This implementation uses 2.0 (JSON)
606
+ metadata where possible. If not possible, it wraps a LegacyMetadata
607
+ instance which handles the key-value metadata format.
608
+ """
609
+
610
+ METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$')
611
+
612
+ NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
613
+
614
+ VERSION_MATCHER = PEP440_VERSION_RE
615
+
616
+ SUMMARY_MATCHER = re.compile('.{1,2047}')
617
+
618
+ METADATA_VERSION = '2.0'
619
+
620
+ GENERATOR = 'distlib (%s)' % __version__
621
+
622
+ MANDATORY_KEYS = {
623
+ 'name': (),
624
+ 'version': (),
625
+ 'summary': ('legacy',),
626
+ }
627
+
628
+ INDEX_KEYS = ('name version license summary description author '
629
+ 'author_email keywords platform home_page classifiers '
630
+ 'download_url')
631
+
632
+ DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
633
+ 'dev_requires provides meta_requires obsoleted_by '
634
+ 'supports_environments')
635
+
636
+ SYNTAX_VALIDATORS = {
637
+ 'metadata_version': (METADATA_VERSION_MATCHER, ()),
638
+ 'name': (NAME_MATCHER, ('legacy',)),
639
+ 'version': (VERSION_MATCHER, ('legacy',)),
640
+ 'summary': (SUMMARY_MATCHER, ('legacy',)),
641
+ }
642
+
643
+ __slots__ = ('_legacy', '_data', 'scheme')
644
+
645
+ def __init__(self, path=None, fileobj=None, mapping=None,
646
+ scheme='default'):
647
+ if [path, fileobj, mapping].count(None) < 2:
648
+ raise TypeError('path, fileobj and mapping are exclusive')
649
+ self._legacy = None
650
+ self._data = None
651
+ self.scheme = scheme
652
+ #import pdb; pdb.set_trace()
653
+ if mapping is not None:
654
+ try:
655
+ self._validate_mapping(mapping, scheme)
656
+ self._data = mapping
657
+ except MetadataUnrecognizedVersionError:
658
+ self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
659
+ self.validate()
660
+ else:
661
+ data = None
662
+ if path:
663
+ with open(path, 'rb') as f:
664
+ data = f.read()
665
+ elif fileobj:
666
+ data = fileobj.read()
667
+ if data is None:
668
+ # Initialised with no args - to be added
669
+ self._data = {
670
+ 'metadata_version': self.METADATA_VERSION,
671
+ 'generator': self.GENERATOR,
672
+ }
673
+ else:
674
+ if not isinstance(data, text_type):
675
+ data = data.decode('utf-8')
676
+ try:
677
+ self._data = json.loads(data)
678
+ self._validate_mapping(self._data, scheme)
679
+ except ValueError:
680
+ # Note: MetadataUnrecognizedVersionError does not
681
+ # inherit from ValueError (it's a DistlibException,
682
+ # which should not inherit from ValueError).
683
+ # The ValueError comes from the json.load - if that
684
+ # succeeds and we get a validation error, we want
685
+ # that to propagate
686
+ self._legacy = LegacyMetadata(fileobj=StringIO(data),
687
+ scheme=scheme)
688
+ self.validate()
689
+
690
+ common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
691
+
692
+ none_list = (None, list)
693
+ none_dict = (None, dict)
694
+
695
+ mapped_keys = {
696
+ 'run_requires': ('Requires-Dist', list),
697
+ 'build_requires': ('Setup-Requires-Dist', list),
698
+ 'dev_requires': none_list,
699
+ 'test_requires': none_list,
700
+ 'meta_requires': none_list,
701
+ 'extras': ('Provides-Extra', list),
702
+ 'modules': none_list,
703
+ 'namespaces': none_list,
704
+ 'exports': none_dict,
705
+ 'commands': none_dict,
706
+ 'classifiers': ('Classifier', list),
707
+ 'source_url': ('Download-URL', None),
708
+ 'metadata_version': ('Metadata-Version', None),
709
+ }
710
+
711
+ del none_list, none_dict
712
+
713
+ def __getattribute__(self, key):
714
+ common = object.__getattribute__(self, 'common_keys')
715
+ mapped = object.__getattribute__(self, 'mapped_keys')
716
+ if key in mapped:
717
+ lk, maker = mapped[key]
718
+ if self._legacy:
719
+ if lk is None:
720
+ result = None if maker is None else maker()
721
+ else:
722
+ result = self._legacy.get(lk)
723
+ else:
724
+ value = None if maker is None else maker()
725
+ if key not in ('commands', 'exports', 'modules', 'namespaces',
726
+ 'classifiers'):
727
+ result = self._data.get(key, value)
728
+ else:
729
+ # special cases for PEP 459
730
+ sentinel = object()
731
+ result = sentinel
732
+ d = self._data.get('extensions')
733
+ if d:
734
+ if key == 'commands':
735
+ result = d.get('python.commands', value)
736
+ elif key == 'classifiers':
737
+ d = d.get('python.details')
738
+ if d:
739
+ result = d.get(key, value)
740
+ else:
741
+ d = d.get('python.exports')
742
+ if not d:
743
+ d = self._data.get('python.exports')
744
+ if d:
745
+ result = d.get(key, value)
746
+ if result is sentinel:
747
+ result = value
748
+ elif key not in common:
749
+ result = object.__getattribute__(self, key)
750
+ elif self._legacy:
751
+ result = self._legacy.get(key)
752
+ else:
753
+ result = self._data.get(key)
754
+ return result
755
+
756
+ def _validate_value(self, key, value, scheme=None):
757
+ if key in self.SYNTAX_VALIDATORS:
758
+ pattern, exclusions = self.SYNTAX_VALIDATORS[key]
759
+ if (scheme or self.scheme) not in exclusions:
760
+ m = pattern.match(value)
761
+ if not m:
762
+ raise MetadataInvalidError("'%s' is an invalid value for "
763
+ "the '%s' property" % (value,
764
+ key))
765
+
766
+ def __setattr__(self, key, value):
767
+ self._validate_value(key, value)
768
+ common = object.__getattribute__(self, 'common_keys')
769
+ mapped = object.__getattribute__(self, 'mapped_keys')
770
+ if key in mapped:
771
+ lk, _ = mapped[key]
772
+ if self._legacy:
773
+ if lk is None:
774
+ raise NotImplementedError
775
+ self._legacy[lk] = value
776
+ elif key not in ('commands', 'exports', 'modules', 'namespaces',
777
+ 'classifiers'):
778
+ self._data[key] = value
779
+ else:
780
+ # special cases for PEP 459
781
+ d = self._data.setdefault('extensions', {})
782
+ if key == 'commands':
783
+ d['python.commands'] = value
784
+ elif key == 'classifiers':
785
+ d = d.setdefault('python.details', {})
786
+ d[key] = value
787
+ else:
788
+ d = d.setdefault('python.exports', {})
789
+ d[key] = value
790
+ elif key not in common:
791
+ object.__setattr__(self, key, value)
792
+ else:
793
+ if key == 'keywords':
794
+ if isinstance(value, string_types):
795
+ value = value.strip()
796
+ if value:
797
+ value = value.split()
798
+ else:
799
+ value = []
800
+ if self._legacy:
801
+ self._legacy[key] = value
802
+ else:
803
+ self._data[key] = value
804
+
805
+ @property
806
+ def name_and_version(self):
807
+ return _get_name_and_version(self.name, self.version, True)
808
+
809
+ @property
810
+ def provides(self):
811
+ if self._legacy:
812
+ result = self._legacy['Provides-Dist']
813
+ else:
814
+ result = self._data.setdefault('provides', [])
815
+ s = '%s (%s)' % (self.name, self.version)
816
+ if s not in result:
817
+ result.append(s)
818
+ return result
819
+
820
+ @provides.setter
821
+ def provides(self, value):
822
+ if self._legacy:
823
+ self._legacy['Provides-Dist'] = value
824
+ else:
825
+ self._data['provides'] = value
826
+
827
+ def get_requirements(self, reqts, extras=None, env=None):
828
+ """
829
+ Base method to get dependencies, given a set of extras
830
+ to satisfy and an optional environment context.
831
+ :param reqts: A list of sometimes-wanted dependencies,
832
+ perhaps dependent on extras and environment.
833
+ :param extras: A list of optional components being requested.
834
+ :param env: An optional environment for marker evaluation.
835
+ """
836
+ if self._legacy:
837
+ result = reqts
838
+ else:
839
+ result = []
840
+ extras = get_extras(extras or [], self.extras)
841
+ for d in reqts:
842
+ if 'extra' not in d and 'environment' not in d:
843
+ # unconditional
844
+ include = True
845
+ else:
846
+ if 'extra' not in d:
847
+ # Not extra-dependent - only environment-dependent
848
+ include = True
849
+ else:
850
+ include = d.get('extra') in extras
851
+ if include:
852
+ # Not excluded because of extras, check environment
853
+ marker = d.get('environment')
854
+ if marker:
855
+ include = interpret(marker, env)
856
+ if include:
857
+ result.extend(d['requires'])
858
+ for key in ('build', 'dev', 'test'):
859
+ e = ':%s:' % key
860
+ if e in extras:
861
+ extras.remove(e)
862
+ # A recursive call, but it should terminate since 'test'
863
+ # has been removed from the extras
864
+ reqts = self._data.get('%s_requires' % key, [])
865
+ result.extend(self.get_requirements(reqts, extras=extras,
866
+ env=env))
867
+ return result
868
+
869
+ @property
870
+ def dictionary(self):
871
+ if self._legacy:
872
+ return self._from_legacy()
873
+ return self._data
874
+
875
+ @property
876
+ def dependencies(self):
877
+ if self._legacy:
878
+ raise NotImplementedError
879
+ else:
880
+ return extract_by_key(self._data, self.DEPENDENCY_KEYS)
881
+
882
+ @dependencies.setter
883
+ def dependencies(self, value):
884
+ if self._legacy:
885
+ raise NotImplementedError
886
+ else:
887
+ self._data.update(value)
888
+
889
+ def _validate_mapping(self, mapping, scheme):
890
+ if mapping.get('metadata_version') != self.METADATA_VERSION:
891
+ raise MetadataUnrecognizedVersionError()
892
+ missing = []
893
+ for key, exclusions in self.MANDATORY_KEYS.items():
894
+ if key not in mapping:
895
+ if scheme not in exclusions:
896
+ missing.append(key)
897
+ if missing:
898
+ msg = 'Missing metadata items: %s' % ', '.join(missing)
899
+ raise MetadataMissingError(msg)
900
+ for k, v in mapping.items():
901
+ self._validate_value(k, v, scheme)
902
+
903
+ def validate(self):
904
+ if self._legacy:
905
+ missing, warnings = self._legacy.check(True)
906
+ if missing or warnings:
907
+ logger.warning('Metadata: missing: %s, warnings: %s',
908
+ missing, warnings)
909
+ else:
910
+ self._validate_mapping(self._data, self.scheme)
911
+
912
+ def todict(self):
913
+ if self._legacy:
914
+ return self._legacy.todict(True)
915
+ else:
916
+ result = extract_by_key(self._data, self.INDEX_KEYS)
917
+ return result
918
+
919
+ def _from_legacy(self):
920
+ assert self._legacy and not self._data
921
+ result = {
922
+ 'metadata_version': self.METADATA_VERSION,
923
+ 'generator': self.GENERATOR,
924
+ }
925
+ lmd = self._legacy.todict(True) # skip missing ones
926
+ for k in ('name', 'version', 'license', 'summary', 'description',
927
+ 'classifier'):
928
+ if k in lmd:
929
+ if k == 'classifier':
930
+ nk = 'classifiers'
931
+ else:
932
+ nk = k
933
+ result[nk] = lmd[k]
934
+ kw = lmd.get('Keywords', [])
935
+ if kw == ['']:
936
+ kw = []
937
+ result['keywords'] = kw
938
+ keys = (('requires_dist', 'run_requires'),
939
+ ('setup_requires_dist', 'build_requires'))
940
+ for ok, nk in keys:
941
+ if ok in lmd and lmd[ok]:
942
+ result[nk] = [{'requires': lmd[ok]}]
943
+ result['provides'] = self.provides
944
+ author = {}
945
+ maintainer = {}
946
+ return result
947
+
948
+ LEGACY_MAPPING = {
949
+ 'name': 'Name',
950
+ 'version': 'Version',
951
+ ('extensions', 'python.details', 'license'): 'License',
952
+ 'summary': 'Summary',
953
+ 'description': 'Description',
954
+ ('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page',
955
+ ('extensions', 'python.project', 'contacts', 0, 'name'): 'Author',
956
+ ('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email',
957
+ 'source_url': 'Download-URL',
958
+ ('extensions', 'python.details', 'classifiers'): 'Classifier',
959
+ }
960
+
961
+ def _to_legacy(self):
962
+ def process_entries(entries):
963
+ reqts = set()
964
+ for e in entries:
965
+ extra = e.get('extra')
966
+ env = e.get('environment')
967
+ rlist = e['requires']
968
+ for r in rlist:
969
+ if not env and not extra:
970
+ reqts.add(r)
971
+ else:
972
+ marker = ''
973
+ if extra:
974
+ marker = 'extra == "%s"' % extra
975
+ if env:
976
+ if marker:
977
+ marker = '(%s) and %s' % (env, marker)
978
+ else:
979
+ marker = env
980
+ reqts.add(';'.join((r, marker)))
981
+ return reqts
982
+
983
+ assert self._data and not self._legacy
984
+ result = LegacyMetadata()
985
+ nmd = self._data
986
+ # import pdb; pdb.set_trace()
987
+ for nk, ok in self.LEGACY_MAPPING.items():
988
+ if not isinstance(nk, tuple):
989
+ if nk in nmd:
990
+ result[ok] = nmd[nk]
991
+ else:
992
+ d = nmd
993
+ found = True
994
+ for k in nk:
995
+ try:
996
+ d = d[k]
997
+ except (KeyError, IndexError):
998
+ found = False
999
+ break
1000
+ if found:
1001
+ result[ok] = d
1002
+ r1 = process_entries(self.run_requires + self.meta_requires)
1003
+ r2 = process_entries(self.build_requires + self.dev_requires)
1004
+ if self.extras:
1005
+ result['Provides-Extra'] = sorted(self.extras)
1006
+ result['Requires-Dist'] = sorted(r1)
1007
+ result['Setup-Requires-Dist'] = sorted(r2)
1008
+ # TODO: any other fields wanted
1009
+ return result
1010
+
1011
+ def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
1012
+ if [path, fileobj].count(None) != 1:
1013
+ raise ValueError('Exactly one of path and fileobj is needed')
1014
+ self.validate()
1015
+ if legacy:
1016
+ if self._legacy:
1017
+ legacy_md = self._legacy
1018
+ else:
1019
+ legacy_md = self._to_legacy()
1020
+ if path:
1021
+ legacy_md.write(path, skip_unknown=skip_unknown)
1022
+ else:
1023
+ legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
1024
+ else:
1025
+ if self._legacy:
1026
+ d = self._from_legacy()
1027
+ else:
1028
+ d = self._data
1029
+ if fileobj:
1030
+ json.dump(d, fileobj, ensure_ascii=True, indent=2,
1031
+ sort_keys=True)
1032
+ else:
1033
+ with codecs.open(path, 'w', 'utf-8') as f:
1034
+ json.dump(d, f, ensure_ascii=True, indent=2,
1035
+ sort_keys=True)
1036
+
1037
+ def add_requirements(self, requirements):
1038
+ if self._legacy:
1039
+ self._legacy.add_requirements(requirements)
1040
+ else:
1041
+ run_requires = self._data.setdefault('run_requires', [])
1042
+ always = None
1043
+ for entry in run_requires:
1044
+ if 'environment' not in entry and 'extra' not in entry:
1045
+ always = entry
1046
+ break
1047
+ if always is None:
1048
+ always = { 'requires': requirements }
1049
+ run_requires.insert(0, always)
1050
+ else:
1051
+ rset = set(always['requires']) | set(requirements)
1052
+ always['requires'] = sorted(rset)
1053
+
1054
+ def __repr__(self):
1055
+ name = self.name or '(no name)'
1056
+ version = self.version or 'no version'
1057
+ return '<%s %s %s (%s)>' % (self.__class__.__name__,
1058
+ self.metadata_version, name, version)
venv/lib/python3.10/site-packages/pip/_vendor/distlib/resources.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2013-2017 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+ from __future__ import unicode_literals
8
+
9
+ import bisect
10
+ import io
11
+ import logging
12
+ import os
13
+ import pkgutil
14
+ import sys
15
+ import types
16
+ import zipimport
17
+
18
+ from . import DistlibException
19
+ from .util import cached_property, get_cache_base, Cache
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ cache = None # created when needed
25
+
26
+
27
+ class ResourceCache(Cache):
28
+ def __init__(self, base=None):
29
+ if base is None:
30
+ # Use native string to avoid issues on 2.x: see Python #20140.
31
+ base = os.path.join(get_cache_base(), str('resource-cache'))
32
+ super(ResourceCache, self).__init__(base)
33
+
34
+ def is_stale(self, resource, path):
35
+ """
36
+ Is the cache stale for the given resource?
37
+
38
+ :param resource: The :class:`Resource` being cached.
39
+ :param path: The path of the resource in the cache.
40
+ :return: True if the cache is stale.
41
+ """
42
+ # Cache invalidation is a hard problem :-)
43
+ return True
44
+
45
+ def get(self, resource):
46
+ """
47
+ Get a resource into the cache,
48
+
49
+ :param resource: A :class:`Resource` instance.
50
+ :return: The pathname of the resource in the cache.
51
+ """
52
+ prefix, path = resource.finder.get_cache_info(resource)
53
+ if prefix is None:
54
+ result = path
55
+ else:
56
+ result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
57
+ dirname = os.path.dirname(result)
58
+ if not os.path.isdir(dirname):
59
+ os.makedirs(dirname)
60
+ if not os.path.exists(result):
61
+ stale = True
62
+ else:
63
+ stale = self.is_stale(resource, path)
64
+ if stale:
65
+ # write the bytes of the resource to the cache location
66
+ with open(result, 'wb') as f:
67
+ f.write(resource.bytes)
68
+ return result
69
+
70
+
71
+ class ResourceBase(object):
72
+ def __init__(self, finder, name):
73
+ self.finder = finder
74
+ self.name = name
75
+
76
+
77
+ class Resource(ResourceBase):
78
+ """
79
+ A class representing an in-package resource, such as a data file. This is
80
+ not normally instantiated by user code, but rather by a
81
+ :class:`ResourceFinder` which manages the resource.
82
+ """
83
+ is_container = False # Backwards compatibility
84
+
85
+ def as_stream(self):
86
+ """
87
+ Get the resource as a stream.
88
+
89
+ This is not a property to make it obvious that it returns a new stream
90
+ each time.
91
+ """
92
+ return self.finder.get_stream(self)
93
+
94
+ @cached_property
95
+ def file_path(self):
96
+ global cache
97
+ if cache is None:
98
+ cache = ResourceCache()
99
+ return cache.get(self)
100
+
101
+ @cached_property
102
+ def bytes(self):
103
+ return self.finder.get_bytes(self)
104
+
105
+ @cached_property
106
+ def size(self):
107
+ return self.finder.get_size(self)
108
+
109
+
110
+ class ResourceContainer(ResourceBase):
111
+ is_container = True # Backwards compatibility
112
+
113
+ @cached_property
114
+ def resources(self):
115
+ return self.finder.get_resources(self)
116
+
117
+
118
+ class ResourceFinder(object):
119
+ """
120
+ Resource finder for file system resources.
121
+ """
122
+
123
+ if sys.platform.startswith('java'):
124
+ skipped_extensions = ('.pyc', '.pyo', '.class')
125
+ else:
126
+ skipped_extensions = ('.pyc', '.pyo')
127
+
128
+ def __init__(self, module):
129
+ self.module = module
130
+ self.loader = getattr(module, '__loader__', None)
131
+ self.base = os.path.dirname(getattr(module, '__file__', ''))
132
+
133
+ def _adjust_path(self, path):
134
+ return os.path.realpath(path)
135
+
136
+ def _make_path(self, resource_name):
137
+ # Issue #50: need to preserve type of path on Python 2.x
138
+ # like os.path._get_sep
139
+ if isinstance(resource_name, bytes): # should only happen on 2.x
140
+ sep = b'/'
141
+ else:
142
+ sep = '/'
143
+ parts = resource_name.split(sep)
144
+ parts.insert(0, self.base)
145
+ result = os.path.join(*parts)
146
+ return self._adjust_path(result)
147
+
148
+ def _find(self, path):
149
+ return os.path.exists(path)
150
+
151
+ def get_cache_info(self, resource):
152
+ return None, resource.path
153
+
154
+ def find(self, resource_name):
155
+ path = self._make_path(resource_name)
156
+ if not self._find(path):
157
+ result = None
158
+ else:
159
+ if self._is_directory(path):
160
+ result = ResourceContainer(self, resource_name)
161
+ else:
162
+ result = Resource(self, resource_name)
163
+ result.path = path
164
+ return result
165
+
166
+ def get_stream(self, resource):
167
+ return open(resource.path, 'rb')
168
+
169
+ def get_bytes(self, resource):
170
+ with open(resource.path, 'rb') as f:
171
+ return f.read()
172
+
173
+ def get_size(self, resource):
174
+ return os.path.getsize(resource.path)
175
+
176
+ def get_resources(self, resource):
177
+ def allowed(f):
178
+ return (f != '__pycache__' and not
179
+ f.endswith(self.skipped_extensions))
180
+ return set([f for f in os.listdir(resource.path) if allowed(f)])
181
+
182
+ def is_container(self, resource):
183
+ return self._is_directory(resource.path)
184
+
185
+ _is_directory = staticmethod(os.path.isdir)
186
+
187
+ def iterator(self, resource_name):
188
+ resource = self.find(resource_name)
189
+ if resource is not None:
190
+ todo = [resource]
191
+ while todo:
192
+ resource = todo.pop(0)
193
+ yield resource
194
+ if resource.is_container:
195
+ rname = resource.name
196
+ for name in resource.resources:
197
+ if not rname:
198
+ new_name = name
199
+ else:
200
+ new_name = '/'.join([rname, name])
201
+ child = self.find(new_name)
202
+ if child.is_container:
203
+ todo.append(child)
204
+ else:
205
+ yield child
206
+
207
+
208
+ class ZipResourceFinder(ResourceFinder):
209
+ """
210
+ Resource finder for resources in .zip files.
211
+ """
212
+ def __init__(self, module):
213
+ super(ZipResourceFinder, self).__init__(module)
214
+ archive = self.loader.archive
215
+ self.prefix_len = 1 + len(archive)
216
+ # PyPy doesn't have a _files attr on zipimporter, and you can't set one
217
+ if hasattr(self.loader, '_files'):
218
+ self._files = self.loader._files
219
+ else:
220
+ self._files = zipimport._zip_directory_cache[archive]
221
+ self.index = sorted(self._files)
222
+
223
+ def _adjust_path(self, path):
224
+ return path
225
+
226
+ def _find(self, path):
227
+ path = path[self.prefix_len:]
228
+ if path in self._files:
229
+ result = True
230
+ else:
231
+ if path and path[-1] != os.sep:
232
+ path = path + os.sep
233
+ i = bisect.bisect(self.index, path)
234
+ try:
235
+ result = self.index[i].startswith(path)
236
+ except IndexError:
237
+ result = False
238
+ if not result:
239
+ logger.debug('_find failed: %r %r', path, self.loader.prefix)
240
+ else:
241
+ logger.debug('_find worked: %r %r', path, self.loader.prefix)
242
+ return result
243
+
244
+ def get_cache_info(self, resource):
245
+ prefix = self.loader.archive
246
+ path = resource.path[1 + len(prefix):]
247
+ return prefix, path
248
+
249
+ def get_bytes(self, resource):
250
+ return self.loader.get_data(resource.path)
251
+
252
+ def get_stream(self, resource):
253
+ return io.BytesIO(self.get_bytes(resource))
254
+
255
+ def get_size(self, resource):
256
+ path = resource.path[self.prefix_len:]
257
+ return self._files[path][3]
258
+
259
+ def get_resources(self, resource):
260
+ path = resource.path[self.prefix_len:]
261
+ if path and path[-1] != os.sep:
262
+ path += os.sep
263
+ plen = len(path)
264
+ result = set()
265
+ i = bisect.bisect(self.index, path)
266
+ while i < len(self.index):
267
+ if not self.index[i].startswith(path):
268
+ break
269
+ s = self.index[i][plen:]
270
+ result.add(s.split(os.sep, 1)[0]) # only immediate children
271
+ i += 1
272
+ return result
273
+
274
+ def _is_directory(self, path):
275
+ path = path[self.prefix_len:]
276
+ if path and path[-1] != os.sep:
277
+ path += os.sep
278
+ i = bisect.bisect(self.index, path)
279
+ try:
280
+ result = self.index[i].startswith(path)
281
+ except IndexError:
282
+ result = False
283
+ return result
284
+
285
+
286
+ _finder_registry = {
287
+ type(None): ResourceFinder,
288
+ zipimport.zipimporter: ZipResourceFinder
289
+ }
290
+
291
+ try:
292
+ # In Python 3.6, _frozen_importlib -> _frozen_importlib_external
293
+ try:
294
+ import _frozen_importlib_external as _fi
295
+ except ImportError:
296
+ import _frozen_importlib as _fi
297
+ _finder_registry[_fi.SourceFileLoader] = ResourceFinder
298
+ _finder_registry[_fi.FileFinder] = ResourceFinder
299
+ # See issue #146
300
+ _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder
301
+ del _fi
302
+ except (ImportError, AttributeError):
303
+ pass
304
+
305
+
306
+ def register_finder(loader, finder_maker):
307
+ _finder_registry[type(loader)] = finder_maker
308
+
309
+
310
+ _finder_cache = {}
311
+
312
+
313
+ def finder(package):
314
+ """
315
+ Return a resource finder for a package.
316
+ :param package: The name of the package.
317
+ :return: A :class:`ResourceFinder` instance for the package.
318
+ """
319
+ if package in _finder_cache:
320
+ result = _finder_cache[package]
321
+ else:
322
+ if package not in sys.modules:
323
+ __import__(package)
324
+ module = sys.modules[package]
325
+ path = getattr(module, '__path__', None)
326
+ if path is None:
327
+ raise DistlibException('You cannot get a finder for a module, '
328
+ 'only for a package')
329
+ loader = getattr(module, '__loader__', None)
330
+ finder_maker = _finder_registry.get(type(loader))
331
+ if finder_maker is None:
332
+ raise DistlibException('Unable to locate finder for %r' % package)
333
+ result = finder_maker(module)
334
+ _finder_cache[package] = result
335
+ return result
336
+
337
+
338
+ _dummy_module = types.ModuleType(str('__dummy__'))
339
+
340
+
341
+ def finder_for_path(path):
342
+ """
343
+ Return a resource finder for a path, which should represent a container.
344
+
345
+ :param path: The path.
346
+ :return: A :class:`ResourceFinder` instance for the path.
347
+ """
348
+ result = None
349
+ # calls any path hooks, gets importer into cache
350
+ pkgutil.get_importer(path)
351
+ loader = sys.path_importer_cache.get(path)
352
+ finder = _finder_registry.get(type(loader))
353
+ if finder:
354
+ module = _dummy_module
355
+ module.__file__ = os.path.join(path, '')
356
+ module.__loader__ = loader
357
+ result = finder(module)
358
+ return result
venv/lib/python3.10/site-packages/pip/_vendor/distlib/scripts.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2013-2015 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+ from io import BytesIO
8
+ import logging
9
+ import os
10
+ import re
11
+ import struct
12
+ import sys
13
+
14
+ from .compat import sysconfig, detect_encoding, ZipFile
15
+ from .resources import finder
16
+ from .util import (FileOperator, get_export_entry, convert_path,
17
+ get_executable, get_platform, in_venv)
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ _DEFAULT_MANIFEST = '''
22
+ <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
23
+ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
24
+ <assemblyIdentity version="1.0.0.0"
25
+ processorArchitecture="X86"
26
+ name="%s"
27
+ type="win32"/>
28
+
29
+ <!-- Identify the application security requirements. -->
30
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
31
+ <security>
32
+ <requestedPrivileges>
33
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
34
+ </requestedPrivileges>
35
+ </security>
36
+ </trustInfo>
37
+ </assembly>'''.strip()
38
+
39
+ # check if Python is called on the first line with this expression
40
+ FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
41
+ SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
42
+ import re
43
+ import sys
44
+ from %(module)s import %(import_name)s
45
+ if __name__ == '__main__':
46
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
47
+ sys.exit(%(func)s())
48
+ '''
49
+
50
+
51
+ def enquote_executable(executable):
52
+ if ' ' in executable:
53
+ # make sure we quote only the executable in case of env
54
+ # for example /usr/bin/env "/dir with spaces/bin/jython"
55
+ # instead of "/usr/bin/env /dir with spaces/bin/jython"
56
+ # otherwise whole
57
+ if executable.startswith('/usr/bin/env '):
58
+ env, _executable = executable.split(' ', 1)
59
+ if ' ' in _executable and not _executable.startswith('"'):
60
+ executable = '%s "%s"' % (env, _executable)
61
+ else:
62
+ if not executable.startswith('"'):
63
+ executable = '"%s"' % executable
64
+ return executable
65
+
66
+ # Keep the old name around (for now), as there is at least one project using it!
67
+ _enquote_executable = enquote_executable
68
+
69
+ class ScriptMaker(object):
70
+ """
71
+ A class to copy or create scripts from source scripts or callable
72
+ specifications.
73
+ """
74
+ script_template = SCRIPT_TEMPLATE
75
+
76
+ executable = None # for shebangs
77
+
78
+ def __init__(self, source_dir, target_dir, add_launchers=True,
79
+ dry_run=False, fileop=None):
80
+ self.source_dir = source_dir
81
+ self.target_dir = target_dir
82
+ self.add_launchers = add_launchers
83
+ self.force = False
84
+ self.clobber = False
85
+ # It only makes sense to set mode bits on POSIX.
86
+ self.set_mode = (os.name == 'posix') or (os.name == 'java' and
87
+ os._name == 'posix')
88
+ self.variants = set(('', 'X.Y'))
89
+ self._fileop = fileop or FileOperator(dry_run)
90
+
91
+ self._is_nt = os.name == 'nt' or (
92
+ os.name == 'java' and os._name == 'nt')
93
+ self.version_info = sys.version_info
94
+
95
+ def _get_alternate_executable(self, executable, options):
96
+ if options.get('gui', False) and self._is_nt: # pragma: no cover
97
+ dn, fn = os.path.split(executable)
98
+ fn = fn.replace('python', 'pythonw')
99
+ executable = os.path.join(dn, fn)
100
+ return executable
101
+
102
+ if sys.platform.startswith('java'): # pragma: no cover
103
+ def _is_shell(self, executable):
104
+ """
105
+ Determine if the specified executable is a script
106
+ (contains a #! line)
107
+ """
108
+ try:
109
+ with open(executable) as fp:
110
+ return fp.read(2) == '#!'
111
+ except (OSError, IOError):
112
+ logger.warning('Failed to open %s', executable)
113
+ return False
114
+
115
+ def _fix_jython_executable(self, executable):
116
+ if self._is_shell(executable):
117
+ # Workaround for Jython is not needed on Linux systems.
118
+ import java
119
+
120
+ if java.lang.System.getProperty('os.name') == 'Linux':
121
+ return executable
122
+ elif executable.lower().endswith('jython.exe'):
123
+ # Use wrapper exe for Jython on Windows
124
+ return executable
125
+ return '/usr/bin/env %s' % executable
126
+
127
+ def _build_shebang(self, executable, post_interp):
128
+ """
129
+ Build a shebang line. In the simple case (on Windows, or a shebang line
130
+ which is not too long or contains spaces) use a simple formulation for
131
+ the shebang. Otherwise, use /bin/sh as the executable, with a contrived
132
+ shebang which allows the script to run either under Python or sh, using
133
+ suitable quoting. Thanks to Harald Nordgren for his input.
134
+
135
+ See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
136
+ https://hg.mozilla.org/mozilla-central/file/tip/mach
137
+ """
138
+ if os.name != 'posix':
139
+ simple_shebang = True
140
+ else:
141
+ # Add 3 for '#!' prefix and newline suffix.
142
+ shebang_length = len(executable) + len(post_interp) + 3
143
+ if sys.platform == 'darwin':
144
+ max_shebang_length = 512
145
+ else:
146
+ max_shebang_length = 127
147
+ simple_shebang = ((b' ' not in executable) and
148
+ (shebang_length <= max_shebang_length))
149
+
150
+ if simple_shebang:
151
+ result = b'#!' + executable + post_interp + b'\n'
152
+ else:
153
+ result = b'#!/bin/sh\n'
154
+ result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
155
+ result += b"' '''"
156
+ return result
157
+
158
+ def _get_shebang(self, encoding, post_interp=b'', options=None):
159
+ enquote = True
160
+ if self.executable:
161
+ executable = self.executable
162
+ enquote = False # assume this will be taken care of
163
+ elif not sysconfig.is_python_build():
164
+ executable = get_executable()
165
+ elif in_venv(): # pragma: no cover
166
+ executable = os.path.join(sysconfig.get_path('scripts'),
167
+ 'python%s' % sysconfig.get_config_var('EXE'))
168
+ else: # pragma: no cover
169
+ executable = os.path.join(
170
+ sysconfig.get_config_var('BINDIR'),
171
+ 'python%s%s' % (sysconfig.get_config_var('VERSION'),
172
+ sysconfig.get_config_var('EXE')))
173
+ if not os.path.isfile(executable):
174
+ # for Python builds from source on Windows, no Python executables with
175
+ # a version suffix are created, so we use python.exe
176
+ executable = os.path.join(sysconfig.get_config_var('BINDIR'),
177
+ 'python%s' % (sysconfig.get_config_var('EXE')))
178
+ if options:
179
+ executable = self._get_alternate_executable(executable, options)
180
+
181
+ if sys.platform.startswith('java'): # pragma: no cover
182
+ executable = self._fix_jython_executable(executable)
183
+
184
+ # Normalise case for Windows - COMMENTED OUT
185
+ # executable = os.path.normcase(executable)
186
+ # N.B. The normalising operation above has been commented out: See
187
+ # issue #124. Although paths in Windows are generally case-insensitive,
188
+ # they aren't always. For example, a path containing a ẞ (which is a
189
+ # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
190
+ # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
191
+ # Windows as equivalent in path names.
192
+
193
+ # If the user didn't specify an executable, it may be necessary to
194
+ # cater for executable paths with spaces (not uncommon on Windows)
195
+ if enquote:
196
+ executable = enquote_executable(executable)
197
+ # Issue #51: don't use fsencode, since we later try to
198
+ # check that the shebang is decodable using utf-8.
199
+ executable = executable.encode('utf-8')
200
+ # in case of IronPython, play safe and enable frames support
201
+ if (sys.platform == 'cli' and '-X:Frames' not in post_interp
202
+ and '-X:FullFrames' not in post_interp): # pragma: no cover
203
+ post_interp += b' -X:Frames'
204
+ shebang = self._build_shebang(executable, post_interp)
205
+ # Python parser starts to read a script using UTF-8 until
206
+ # it gets a #coding:xxx cookie. The shebang has to be the
207
+ # first line of a file, the #coding:xxx cookie cannot be
208
+ # written before. So the shebang has to be decodable from
209
+ # UTF-8.
210
+ try:
211
+ shebang.decode('utf-8')
212
+ except UnicodeDecodeError: # pragma: no cover
213
+ raise ValueError(
214
+ 'The shebang (%r) is not decodable from utf-8' % shebang)
215
+ # If the script is encoded to a custom encoding (use a
216
+ # #coding:xxx cookie), the shebang has to be decodable from
217
+ # the script encoding too.
218
+ if encoding != 'utf-8':
219
+ try:
220
+ shebang.decode(encoding)
221
+ except UnicodeDecodeError: # pragma: no cover
222
+ raise ValueError(
223
+ 'The shebang (%r) is not decodable '
224
+ 'from the script encoding (%r)' % (shebang, encoding))
225
+ return shebang
226
+
227
+ def _get_script_text(self, entry):
228
+ return self.script_template % dict(module=entry.prefix,
229
+ import_name=entry.suffix.split('.')[0],
230
+ func=entry.suffix)
231
+
232
+ manifest = _DEFAULT_MANIFEST
233
+
234
+ def get_manifest(self, exename):
235
+ base = os.path.basename(exename)
236
+ return self.manifest % base
237
+
238
+ def _write_script(self, names, shebang, script_bytes, filenames, ext):
239
+ use_launcher = self.add_launchers and self._is_nt
240
+ linesep = os.linesep.encode('utf-8')
241
+ if not shebang.endswith(linesep):
242
+ shebang += linesep
243
+ if not use_launcher:
244
+ script_bytes = shebang + script_bytes
245
+ else: # pragma: no cover
246
+ if ext == 'py':
247
+ launcher = self._get_launcher('t')
248
+ else:
249
+ launcher = self._get_launcher('w')
250
+ stream = BytesIO()
251
+ with ZipFile(stream, 'w') as zf:
252
+ zf.writestr('__main__.py', script_bytes)
253
+ zip_data = stream.getvalue()
254
+ script_bytes = launcher + shebang + zip_data
255
+ for name in names:
256
+ outname = os.path.join(self.target_dir, name)
257
+ if use_launcher: # pragma: no cover
258
+ n, e = os.path.splitext(outname)
259
+ if e.startswith('.py'):
260
+ outname = n
261
+ outname = '%s.exe' % outname
262
+ try:
263
+ self._fileop.write_binary_file(outname, script_bytes)
264
+ except Exception:
265
+ # Failed writing an executable - it might be in use.
266
+ logger.warning('Failed to write executable - trying to '
267
+ 'use .deleteme logic')
268
+ dfname = '%s.deleteme' % outname
269
+ if os.path.exists(dfname):
270
+ os.remove(dfname) # Not allowed to fail here
271
+ os.rename(outname, dfname) # nor here
272
+ self._fileop.write_binary_file(outname, script_bytes)
273
+ logger.debug('Able to replace executable using '
274
+ '.deleteme logic')
275
+ try:
276
+ os.remove(dfname)
277
+ except Exception:
278
+ pass # still in use - ignore error
279
+ else:
280
+ if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
281
+ outname = '%s.%s' % (outname, ext)
282
+ if os.path.exists(outname) and not self.clobber:
283
+ logger.warning('Skipping existing file %s', outname)
284
+ continue
285
+ self._fileop.write_binary_file(outname, script_bytes)
286
+ if self.set_mode:
287
+ self._fileop.set_executable_mode([outname])
288
+ filenames.append(outname)
289
+
290
+ variant_separator = '-'
291
+
292
+ def get_script_filenames(self, name):
293
+ result = set()
294
+ if '' in self.variants:
295
+ result.add(name)
296
+ if 'X' in self.variants:
297
+ result.add('%s%s' % (name, self.version_info[0]))
298
+ if 'X.Y' in self.variants:
299
+ result.add('%s%s%s.%s' % (name, self.variant_separator,
300
+ self.version_info[0], self.version_info[1]))
301
+ return result
302
+
303
+ def _make_script(self, entry, filenames, options=None):
304
+ post_interp = b''
305
+ if options:
306
+ args = options.get('interpreter_args', [])
307
+ if args:
308
+ args = ' %s' % ' '.join(args)
309
+ post_interp = args.encode('utf-8')
310
+ shebang = self._get_shebang('utf-8', post_interp, options=options)
311
+ script = self._get_script_text(entry).encode('utf-8')
312
+ scriptnames = self.get_script_filenames(entry.name)
313
+ if options and options.get('gui', False):
314
+ ext = 'pyw'
315
+ else:
316
+ ext = 'py'
317
+ self._write_script(scriptnames, shebang, script, filenames, ext)
318
+
319
+ def _copy_script(self, script, filenames):
320
+ adjust = False
321
+ script = os.path.join(self.source_dir, convert_path(script))
322
+ outname = os.path.join(self.target_dir, os.path.basename(script))
323
+ if not self.force and not self._fileop.newer(script, outname):
324
+ logger.debug('not copying %s (up-to-date)', script)
325
+ return
326
+
327
+ # Always open the file, but ignore failures in dry-run mode --
328
+ # that way, we'll get accurate feedback if we can read the
329
+ # script.
330
+ try:
331
+ f = open(script, 'rb')
332
+ except IOError: # pragma: no cover
333
+ if not self.dry_run:
334
+ raise
335
+ f = None
336
+ else:
337
+ first_line = f.readline()
338
+ if not first_line: # pragma: no cover
339
+ logger.warning('%s is an empty file (skipping)', script)
340
+ return
341
+
342
+ match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
343
+ if match:
344
+ adjust = True
345
+ post_interp = match.group(1) or b''
346
+
347
+ if not adjust:
348
+ if f:
349
+ f.close()
350
+ self._fileop.copy_file(script, outname)
351
+ if self.set_mode:
352
+ self._fileop.set_executable_mode([outname])
353
+ filenames.append(outname)
354
+ else:
355
+ logger.info('copying and adjusting %s -> %s', script,
356
+ self.target_dir)
357
+ if not self._fileop.dry_run:
358
+ encoding, lines = detect_encoding(f.readline)
359
+ f.seek(0)
360
+ shebang = self._get_shebang(encoding, post_interp)
361
+ if b'pythonw' in first_line: # pragma: no cover
362
+ ext = 'pyw'
363
+ else:
364
+ ext = 'py'
365
+ n = os.path.basename(outname)
366
+ self._write_script([n], shebang, f.read(), filenames, ext)
367
+ if f:
368
+ f.close()
369
+
370
+ @property
371
+ def dry_run(self):
372
+ return self._fileop.dry_run
373
+
374
+ @dry_run.setter
375
+ def dry_run(self, value):
376
+ self._fileop.dry_run = value
377
+
378
+ if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
379
+ # Executable launcher support.
380
+ # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
381
+
382
+ def _get_launcher(self, kind):
383
+ if struct.calcsize('P') == 8: # 64-bit
384
+ bits = '64'
385
+ else:
386
+ bits = '32'
387
+ platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''
388
+ name = '%s%s%s.exe' % (kind, bits, platform_suffix)
389
+ # Issue 31: don't hardcode an absolute package name, but
390
+ # determine it relative to the current package
391
+ distlib_package = __name__.rsplit('.', 1)[0]
392
+ resource = finder(distlib_package).find(name)
393
+ if not resource:
394
+ msg = ('Unable to find resource %s in package %s' % (name,
395
+ distlib_package))
396
+ raise ValueError(msg)
397
+ return resource.bytes
398
+
399
+ # Public API follows
400
+
401
+ def make(self, specification, options=None):
402
+ """
403
+ Make a script.
404
+
405
+ :param specification: The specification, which is either a valid export
406
+ entry specification (to make a script from a
407
+ callable) or a filename (to make a script by
408
+ copying from a source location).
409
+ :param options: A dictionary of options controlling script generation.
410
+ :return: A list of all absolute pathnames written to.
411
+ """
412
+ filenames = []
413
+ entry = get_export_entry(specification)
414
+ if entry is None:
415
+ self._copy_script(specification, filenames)
416
+ else:
417
+ self._make_script(entry, filenames, options=options)
418
+ return filenames
419
+
420
+ def make_multiple(self, specifications, options=None):
421
+ """
422
+ Take a list of specifications and make scripts from them,
423
+ :param specifications: A list of specifications.
424
+ :return: A list of all absolute pathnames written to,
425
+ """
426
+ filenames = []
427
+ for specification in specifications:
428
+ filenames.extend(self.make(specification, options))
429
+ return filenames
venv/lib/python3.10/site-packages/pip/_vendor/distlib/util.py ADDED
@@ -0,0 +1,1932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (C) 2012-2021 The Python Software Foundation.
3
+ # See LICENSE.txt and CONTRIBUTORS.txt.
4
+ #
5
+ import codecs
6
+ from collections import deque
7
+ import contextlib
8
+ import csv
9
+ from glob import iglob as std_iglob
10
+ import io
11
+ import json
12
+ import logging
13
+ import os
14
+ import py_compile
15
+ import re
16
+ import socket
17
+ try:
18
+ import ssl
19
+ except ImportError: # pragma: no cover
20
+ ssl = None
21
+ import subprocess
22
+ import sys
23
+ import tarfile
24
+ import tempfile
25
+ import textwrap
26
+
27
+ try:
28
+ import threading
29
+ except ImportError: # pragma: no cover
30
+ import dummy_threading as threading
31
+ import time
32
+
33
+ from . import DistlibException
34
+ from .compat import (string_types, text_type, shutil, raw_input, StringIO,
35
+ cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
36
+ splittype, HTTPHandler, BaseConfigurator, valid_ident,
37
+ Container, configparser, URLError, ZipFile, fsdecode,
38
+ unquote, urlparse)
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+ #
43
+ # Requirement parsing code as per PEP 508
44
+ #
45
+
46
+ IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
47
+ VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
48
+ COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
49
+ MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
50
+ OR = re.compile(r'^or\b\s*')
51
+ AND = re.compile(r'^and\b\s*')
52
+ NON_SPACE = re.compile(r'(\S+)\s*')
53
+ STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
54
+
55
+
56
+ def parse_marker(marker_string):
57
+ """
58
+ Parse a marker string and return a dictionary containing a marker expression.
59
+
60
+ The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
61
+ the expression grammar, or strings. A string contained in quotes is to be
62
+ interpreted as a literal string, and a string not contained in quotes is a
63
+ variable (such as os_name).
64
+ """
65
+ def marker_var(remaining):
66
+ # either identifier, or literal string
67
+ m = IDENTIFIER.match(remaining)
68
+ if m:
69
+ result = m.groups()[0]
70
+ remaining = remaining[m.end():]
71
+ elif not remaining:
72
+ raise SyntaxError('unexpected end of input')
73
+ else:
74
+ q = remaining[0]
75
+ if q not in '\'"':
76
+ raise SyntaxError('invalid expression: %s' % remaining)
77
+ oq = '\'"'.replace(q, '')
78
+ remaining = remaining[1:]
79
+ parts = [q]
80
+ while remaining:
81
+ # either a string chunk, or oq, or q to terminate
82
+ if remaining[0] == q:
83
+ break
84
+ elif remaining[0] == oq:
85
+ parts.append(oq)
86
+ remaining = remaining[1:]
87
+ else:
88
+ m = STRING_CHUNK.match(remaining)
89
+ if not m:
90
+ raise SyntaxError('error in string literal: %s' % remaining)
91
+ parts.append(m.groups()[0])
92
+ remaining = remaining[m.end():]
93
+ else:
94
+ s = ''.join(parts)
95
+ raise SyntaxError('unterminated string: %s' % s)
96
+ parts.append(q)
97
+ result = ''.join(parts)
98
+ remaining = remaining[1:].lstrip() # skip past closing quote
99
+ return result, remaining
100
+
101
+ def marker_expr(remaining):
102
+ if remaining and remaining[0] == '(':
103
+ result, remaining = marker(remaining[1:].lstrip())
104
+ if remaining[0] != ')':
105
+ raise SyntaxError('unterminated parenthesis: %s' % remaining)
106
+ remaining = remaining[1:].lstrip()
107
+ else:
108
+ lhs, remaining = marker_var(remaining)
109
+ while remaining:
110
+ m = MARKER_OP.match(remaining)
111
+ if not m:
112
+ break
113
+ op = m.groups()[0]
114
+ remaining = remaining[m.end():]
115
+ rhs, remaining = marker_var(remaining)
116
+ lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
117
+ result = lhs
118
+ return result, remaining
119
+
120
+ def marker_and(remaining):
121
+ lhs, remaining = marker_expr(remaining)
122
+ while remaining:
123
+ m = AND.match(remaining)
124
+ if not m:
125
+ break
126
+ remaining = remaining[m.end():]
127
+ rhs, remaining = marker_expr(remaining)
128
+ lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
129
+ return lhs, remaining
130
+
131
+ def marker(remaining):
132
+ lhs, remaining = marker_and(remaining)
133
+ while remaining:
134
+ m = OR.match(remaining)
135
+ if not m:
136
+ break
137
+ remaining = remaining[m.end():]
138
+ rhs, remaining = marker_and(remaining)
139
+ lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
140
+ return lhs, remaining
141
+
142
+ return marker(marker_string)
143
+
144
+
145
+ def parse_requirement(req):
146
+ """
147
+ Parse a requirement passed in as a string. Return a Container
148
+ whose attributes contain the various parts of the requirement.
149
+ """
150
+ remaining = req.strip()
151
+ if not remaining or remaining.startswith('#'):
152
+ return None
153
+ m = IDENTIFIER.match(remaining)
154
+ if not m:
155
+ raise SyntaxError('name expected: %s' % remaining)
156
+ distname = m.groups()[0]
157
+ remaining = remaining[m.end():]
158
+ extras = mark_expr = versions = uri = None
159
+ if remaining and remaining[0] == '[':
160
+ i = remaining.find(']', 1)
161
+ if i < 0:
162
+ raise SyntaxError('unterminated extra: %s' % remaining)
163
+ s = remaining[1:i]
164
+ remaining = remaining[i + 1:].lstrip()
165
+ extras = []
166
+ while s:
167
+ m = IDENTIFIER.match(s)
168
+ if not m:
169
+ raise SyntaxError('malformed extra: %s' % s)
170
+ extras.append(m.groups()[0])
171
+ s = s[m.end():]
172
+ if not s:
173
+ break
174
+ if s[0] != ',':
175
+ raise SyntaxError('comma expected in extras: %s' % s)
176
+ s = s[1:].lstrip()
177
+ if not extras:
178
+ extras = None
179
+ if remaining:
180
+ if remaining[0] == '@':
181
+ # it's a URI
182
+ remaining = remaining[1:].lstrip()
183
+ m = NON_SPACE.match(remaining)
184
+ if not m:
185
+ raise SyntaxError('invalid URI: %s' % remaining)
186
+ uri = m.groups()[0]
187
+ t = urlparse(uri)
188
+ # there are issues with Python and URL parsing, so this test
189
+ # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
190
+ # always parse invalid URLs correctly - it should raise
191
+ # exceptions for malformed URLs
192
+ if not (t.scheme and t.netloc):
193
+ raise SyntaxError('Invalid URL: %s' % uri)
194
+ remaining = remaining[m.end():].lstrip()
195
+ else:
196
+
197
+ def get_versions(ver_remaining):
198
+ """
199
+ Return a list of operator, version tuples if any are
200
+ specified, else None.
201
+ """
202
+ m = COMPARE_OP.match(ver_remaining)
203
+ versions = None
204
+ if m:
205
+ versions = []
206
+ while True:
207
+ op = m.groups()[0]
208
+ ver_remaining = ver_remaining[m.end():]
209
+ m = VERSION_IDENTIFIER.match(ver_remaining)
210
+ if not m:
211
+ raise SyntaxError('invalid version: %s' % ver_remaining)
212
+ v = m.groups()[0]
213
+ versions.append((op, v))
214
+ ver_remaining = ver_remaining[m.end():]
215
+ if not ver_remaining or ver_remaining[0] != ',':
216
+ break
217
+ ver_remaining = ver_remaining[1:].lstrip()
218
+ # Some packages have a trailing comma which would break things
219
+ # See issue #148
220
+ if not ver_remaining:
221
+ break
222
+ m = COMPARE_OP.match(ver_remaining)
223
+ if not m:
224
+ raise SyntaxError('invalid constraint: %s' % ver_remaining)
225
+ if not versions:
226
+ versions = None
227
+ return versions, ver_remaining
228
+
229
+ if remaining[0] != '(':
230
+ versions, remaining = get_versions(remaining)
231
+ else:
232
+ i = remaining.find(')', 1)
233
+ if i < 0:
234
+ raise SyntaxError('unterminated parenthesis: %s' % remaining)
235
+ s = remaining[1:i]
236
+ remaining = remaining[i + 1:].lstrip()
237
+ # As a special diversion from PEP 508, allow a version number
238
+ # a.b.c in parentheses as a synonym for ~= a.b.c (because this
239
+ # is allowed in earlier PEPs)
240
+ if COMPARE_OP.match(s):
241
+ versions, _ = get_versions(s)
242
+ else:
243
+ m = VERSION_IDENTIFIER.match(s)
244
+ if not m:
245
+ raise SyntaxError('invalid constraint: %s' % s)
246
+ v = m.groups()[0]
247
+ s = s[m.end():].lstrip()
248
+ if s:
249
+ raise SyntaxError('invalid constraint: %s' % s)
250
+ versions = [('~=', v)]
251
+
252
+ if remaining:
253
+ if remaining[0] != ';':
254
+ raise SyntaxError('invalid requirement: %s' % remaining)
255
+ remaining = remaining[1:].lstrip()
256
+
257
+ mark_expr, remaining = parse_marker(remaining)
258
+
259
+ if remaining and remaining[0] != '#':
260
+ raise SyntaxError('unexpected trailing data: %s' % remaining)
261
+
262
+ if not versions:
263
+ rs = distname
264
+ else:
265
+ rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
266
+ return Container(name=distname, extras=extras, constraints=versions,
267
+ marker=mark_expr, url=uri, requirement=rs)
268
+
269
+
270
+ def get_resources_dests(resources_root, rules):
271
+ """Find destinations for resources files"""
272
+
273
+ def get_rel_path(root, path):
274
+ # normalizes and returns a lstripped-/-separated path
275
+ root = root.replace(os.path.sep, '/')
276
+ path = path.replace(os.path.sep, '/')
277
+ assert path.startswith(root)
278
+ return path[len(root):].lstrip('/')
279
+
280
+ destinations = {}
281
+ for base, suffix, dest in rules:
282
+ prefix = os.path.join(resources_root, base)
283
+ for abs_base in iglob(prefix):
284
+ abs_glob = os.path.join(abs_base, suffix)
285
+ for abs_path in iglob(abs_glob):
286
+ resource_file = get_rel_path(resources_root, abs_path)
287
+ if dest is None: # remove the entry if it was here
288
+ destinations.pop(resource_file, None)
289
+ else:
290
+ rel_path = get_rel_path(abs_base, abs_path)
291
+ rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
292
+ destinations[resource_file] = rel_dest + '/' + rel_path
293
+ return destinations
294
+
295
+
296
+ def in_venv():
297
+ if hasattr(sys, 'real_prefix'):
298
+ # virtualenv venvs
299
+ result = True
300
+ else:
301
+ # PEP 405 venvs
302
+ result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
303
+ return result
304
+
305
+
306
+ def get_executable():
307
+ # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
308
+ # changes to the stub launcher mean that sys.executable always points
309
+ # to the stub on OS X
310
+ # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
311
+ # in os.environ):
312
+ # result = os.environ['__PYVENV_LAUNCHER__']
313
+ # else:
314
+ # result = sys.executable
315
+ # return result
316
+ # Avoid normcasing: see issue #143
317
+ # result = os.path.normcase(sys.executable)
318
+ result = sys.executable
319
+ if not isinstance(result, text_type):
320
+ result = fsdecode(result)
321
+ return result
322
+
323
+
324
+ def proceed(prompt, allowed_chars, error_prompt=None, default=None):
325
+ p = prompt
326
+ while True:
327
+ s = raw_input(p)
328
+ p = prompt
329
+ if not s and default:
330
+ s = default
331
+ if s:
332
+ c = s[0].lower()
333
+ if c in allowed_chars:
334
+ break
335
+ if error_prompt:
336
+ p = '%c: %s\n%s' % (c, error_prompt, prompt)
337
+ return c
338
+
339
+
340
+ def extract_by_key(d, keys):
341
+ if isinstance(keys, string_types):
342
+ keys = keys.split()
343
+ result = {}
344
+ for key in keys:
345
+ if key in d:
346
+ result[key] = d[key]
347
+ return result
348
+
349
+ def read_exports(stream):
350
+ if sys.version_info[0] >= 3:
351
+ # needs to be a text stream
352
+ stream = codecs.getreader('utf-8')(stream)
353
+ # Try to load as JSON, falling back on legacy format
354
+ data = stream.read()
355
+ stream = StringIO(data)
356
+ try:
357
+ jdata = json.load(stream)
358
+ result = jdata['extensions']['python.exports']['exports']
359
+ for group, entries in result.items():
360
+ for k, v in entries.items():
361
+ s = '%s = %s' % (k, v)
362
+ entry = get_export_entry(s)
363
+ assert entry is not None
364
+ entries[k] = entry
365
+ return result
366
+ except Exception:
367
+ stream.seek(0, 0)
368
+
369
+ def read_stream(cp, stream):
370
+ if hasattr(cp, 'read_file'):
371
+ cp.read_file(stream)
372
+ else:
373
+ cp.readfp(stream)
374
+
375
+ cp = configparser.ConfigParser()
376
+ try:
377
+ read_stream(cp, stream)
378
+ except configparser.MissingSectionHeaderError:
379
+ stream.close()
380
+ data = textwrap.dedent(data)
381
+ stream = StringIO(data)
382
+ read_stream(cp, stream)
383
+
384
+ result = {}
385
+ for key in cp.sections():
386
+ result[key] = entries = {}
387
+ for name, value in cp.items(key):
388
+ s = '%s = %s' % (name, value)
389
+ entry = get_export_entry(s)
390
+ assert entry is not None
391
+ #entry.dist = self
392
+ entries[name] = entry
393
+ return result
394
+
395
+
396
+ def write_exports(exports, stream):
397
+ if sys.version_info[0] >= 3:
398
+ # needs to be a text stream
399
+ stream = codecs.getwriter('utf-8')(stream)
400
+ cp = configparser.ConfigParser()
401
+ for k, v in exports.items():
402
+ # TODO check k, v for valid values
403
+ cp.add_section(k)
404
+ for entry in v.values():
405
+ if entry.suffix is None:
406
+ s = entry.prefix
407
+ else:
408
+ s = '%s:%s' % (entry.prefix, entry.suffix)
409
+ if entry.flags:
410
+ s = '%s [%s]' % (s, ', '.join(entry.flags))
411
+ cp.set(k, entry.name, s)
412
+ cp.write(stream)
413
+
414
+
415
+ @contextlib.contextmanager
416
+ def tempdir():
417
+ td = tempfile.mkdtemp()
418
+ try:
419
+ yield td
420
+ finally:
421
+ shutil.rmtree(td)
422
+
423
+ @contextlib.contextmanager
424
+ def chdir(d):
425
+ cwd = os.getcwd()
426
+ try:
427
+ os.chdir(d)
428
+ yield
429
+ finally:
430
+ os.chdir(cwd)
431
+
432
+
433
+ @contextlib.contextmanager
434
+ def socket_timeout(seconds=15):
435
+ cto = socket.getdefaulttimeout()
436
+ try:
437
+ socket.setdefaulttimeout(seconds)
438
+ yield
439
+ finally:
440
+ socket.setdefaulttimeout(cto)
441
+
442
+
443
+ class cached_property(object):
444
+ def __init__(self, func):
445
+ self.func = func
446
+ #for attr in ('__name__', '__module__', '__doc__'):
447
+ # setattr(self, attr, getattr(func, attr, None))
448
+
449
+ def __get__(self, obj, cls=None):
450
+ if obj is None:
451
+ return self
452
+ value = self.func(obj)
453
+ object.__setattr__(obj, self.func.__name__, value)
454
+ #obj.__dict__[self.func.__name__] = value = self.func(obj)
455
+ return value
456
+
457
+ def convert_path(pathname):
458
+ """Return 'pathname' as a name that will work on the native filesystem.
459
+
460
+ The path is split on '/' and put back together again using the current
461
+ directory separator. Needed because filenames in the setup script are
462
+ always supplied in Unix style, and have to be converted to the local
463
+ convention before we can actually use them in the filesystem. Raises
464
+ ValueError on non-Unix-ish systems if 'pathname' either starts or
465
+ ends with a slash.
466
+ """
467
+ if os.sep == '/':
468
+ return pathname
469
+ if not pathname:
470
+ return pathname
471
+ if pathname[0] == '/':
472
+ raise ValueError("path '%s' cannot be absolute" % pathname)
473
+ if pathname[-1] == '/':
474
+ raise ValueError("path '%s' cannot end with '/'" % pathname)
475
+
476
+ paths = pathname.split('/')
477
+ while os.curdir in paths:
478
+ paths.remove(os.curdir)
479
+ if not paths:
480
+ return os.curdir
481
+ return os.path.join(*paths)
482
+
483
+
484
+ class FileOperator(object):
485
+ def __init__(self, dry_run=False):
486
+ self.dry_run = dry_run
487
+ self.ensured = set()
488
+ self._init_record()
489
+
490
+ def _init_record(self):
491
+ self.record = False
492
+ self.files_written = set()
493
+ self.dirs_created = set()
494
+
495
+ def record_as_written(self, path):
496
+ if self.record:
497
+ self.files_written.add(path)
498
+
499
+ def newer(self, source, target):
500
+ """Tell if the target is newer than the source.
501
+
502
+ Returns true if 'source' exists and is more recently modified than
503
+ 'target', or if 'source' exists and 'target' doesn't.
504
+
505
+ Returns false if both exist and 'target' is the same age or younger
506
+ than 'source'. Raise PackagingFileError if 'source' does not exist.
507
+
508
+ Note that this test is not very accurate: files created in the same
509
+ second will have the same "age".
510
+ """
511
+ if not os.path.exists(source):
512
+ raise DistlibException("file '%r' does not exist" %
513
+ os.path.abspath(source))
514
+ if not os.path.exists(target):
515
+ return True
516
+
517
+ return os.stat(source).st_mtime > os.stat(target).st_mtime
518
+
519
+ def copy_file(self, infile, outfile, check=True):
520
+ """Copy a file respecting dry-run and force flags.
521
+ """
522
+ self.ensure_dir(os.path.dirname(outfile))
523
+ logger.info('Copying %s to %s', infile, outfile)
524
+ if not self.dry_run:
525
+ msg = None
526
+ if check:
527
+ if os.path.islink(outfile):
528
+ msg = '%s is a symlink' % outfile
529
+ elif os.path.exists(outfile) and not os.path.isfile(outfile):
530
+ msg = '%s is a non-regular file' % outfile
531
+ if msg:
532
+ raise ValueError(msg + ' which would be overwritten')
533
+ shutil.copyfile(infile, outfile)
534
+ self.record_as_written(outfile)
535
+
536
+ def copy_stream(self, instream, outfile, encoding=None):
537
+ assert not os.path.isdir(outfile)
538
+ self.ensure_dir(os.path.dirname(outfile))
539
+ logger.info('Copying stream %s to %s', instream, outfile)
540
+ if not self.dry_run:
541
+ if encoding is None:
542
+ outstream = open(outfile, 'wb')
543
+ else:
544
+ outstream = codecs.open(outfile, 'w', encoding=encoding)
545
+ try:
546
+ shutil.copyfileobj(instream, outstream)
547
+ finally:
548
+ outstream.close()
549
+ self.record_as_written(outfile)
550
+
551
+ def write_binary_file(self, path, data):
552
+ self.ensure_dir(os.path.dirname(path))
553
+ if not self.dry_run:
554
+ if os.path.exists(path):
555
+ os.remove(path)
556
+ with open(path, 'wb') as f:
557
+ f.write(data)
558
+ self.record_as_written(path)
559
+
560
+ def write_text_file(self, path, data, encoding):
561
+ self.write_binary_file(path, data.encode(encoding))
562
+
563
+ def set_mode(self, bits, mask, files):
564
+ if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
565
+ # Set the executable bits (owner, group, and world) on
566
+ # all the files specified.
567
+ for f in files:
568
+ if self.dry_run:
569
+ logger.info("changing mode of %s", f)
570
+ else:
571
+ mode = (os.stat(f).st_mode | bits) & mask
572
+ logger.info("changing mode of %s to %o", f, mode)
573
+ os.chmod(f, mode)
574
+
575
+ set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
576
+
577
+ def ensure_dir(self, path):
578
+ path = os.path.abspath(path)
579
+ if path not in self.ensured and not os.path.exists(path):
580
+ self.ensured.add(path)
581
+ d, f = os.path.split(path)
582
+ self.ensure_dir(d)
583
+ logger.info('Creating %s' % path)
584
+ if not self.dry_run:
585
+ os.mkdir(path)
586
+ if self.record:
587
+ self.dirs_created.add(path)
588
+
589
+ def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
590
+ dpath = cache_from_source(path, not optimize)
591
+ logger.info('Byte-compiling %s to %s', path, dpath)
592
+ if not self.dry_run:
593
+ if force or self.newer(path, dpath):
594
+ if not prefix:
595
+ diagpath = None
596
+ else:
597
+ assert path.startswith(prefix)
598
+ diagpath = path[len(prefix):]
599
+ compile_kwargs = {}
600
+ if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
601
+ compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
602
+ py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
603
+ self.record_as_written(dpath)
604
+ return dpath
605
+
606
+ def ensure_removed(self, path):
607
+ if os.path.exists(path):
608
+ if os.path.isdir(path) and not os.path.islink(path):
609
+ logger.debug('Removing directory tree at %s', path)
610
+ if not self.dry_run:
611
+ shutil.rmtree(path)
612
+ if self.record:
613
+ if path in self.dirs_created:
614
+ self.dirs_created.remove(path)
615
+ else:
616
+ if os.path.islink(path):
617
+ s = 'link'
618
+ else:
619
+ s = 'file'
620
+ logger.debug('Removing %s %s', s, path)
621
+ if not self.dry_run:
622
+ os.remove(path)
623
+ if self.record:
624
+ if path in self.files_written:
625
+ self.files_written.remove(path)
626
+
627
+ def is_writable(self, path):
628
+ result = False
629
+ while not result:
630
+ if os.path.exists(path):
631
+ result = os.access(path, os.W_OK)
632
+ break
633
+ parent = os.path.dirname(path)
634
+ if parent == path:
635
+ break
636
+ path = parent
637
+ return result
638
+
639
+ def commit(self):
640
+ """
641
+ Commit recorded changes, turn off recording, return
642
+ changes.
643
+ """
644
+ assert self.record
645
+ result = self.files_written, self.dirs_created
646
+ self._init_record()
647
+ return result
648
+
649
+ def rollback(self):
650
+ if not self.dry_run:
651
+ for f in list(self.files_written):
652
+ if os.path.exists(f):
653
+ os.remove(f)
654
+ # dirs should all be empty now, except perhaps for
655
+ # __pycache__ subdirs
656
+ # reverse so that subdirs appear before their parents
657
+ dirs = sorted(self.dirs_created, reverse=True)
658
+ for d in dirs:
659
+ flist = os.listdir(d)
660
+ if flist:
661
+ assert flist == ['__pycache__']
662
+ sd = os.path.join(d, flist[0])
663
+ os.rmdir(sd)
664
+ os.rmdir(d) # should fail if non-empty
665
+ self._init_record()
666
+
667
+ def resolve(module_name, dotted_path):
668
+ if module_name in sys.modules:
669
+ mod = sys.modules[module_name]
670
+ else:
671
+ mod = __import__(module_name)
672
+ if dotted_path is None:
673
+ result = mod
674
+ else:
675
+ parts = dotted_path.split('.')
676
+ result = getattr(mod, parts.pop(0))
677
+ for p in parts:
678
+ result = getattr(result, p)
679
+ return result
680
+
681
+
682
+ class ExportEntry(object):
683
+ def __init__(self, name, prefix, suffix, flags):
684
+ self.name = name
685
+ self.prefix = prefix
686
+ self.suffix = suffix
687
+ self.flags = flags
688
+
689
+ @cached_property
690
+ def value(self):
691
+ return resolve(self.prefix, self.suffix)
692
+
693
+ def __repr__(self): # pragma: no cover
694
+ return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
695
+ self.suffix, self.flags)
696
+
697
+ def __eq__(self, other):
698
+ if not isinstance(other, ExportEntry):
699
+ result = False
700
+ else:
701
+ result = (self.name == other.name and
702
+ self.prefix == other.prefix and
703
+ self.suffix == other.suffix and
704
+ self.flags == other.flags)
705
+ return result
706
+
707
+ __hash__ = object.__hash__
708
+
709
+
710
+ ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
711
+ \s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
712
+ \s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
713
+ ''', re.VERBOSE)
714
+
715
+ def get_export_entry(specification):
716
+ m = ENTRY_RE.search(specification)
717
+ if not m:
718
+ result = None
719
+ if '[' in specification or ']' in specification:
720
+ raise DistlibException("Invalid specification "
721
+ "'%s'" % specification)
722
+ else:
723
+ d = m.groupdict()
724
+ name = d['name']
725
+ path = d['callable']
726
+ colons = path.count(':')
727
+ if colons == 0:
728
+ prefix, suffix = path, None
729
+ else:
730
+ if colons != 1:
731
+ raise DistlibException("Invalid specification "
732
+ "'%s'" % specification)
733
+ prefix, suffix = path.split(':')
734
+ flags = d['flags']
735
+ if flags is None:
736
+ if '[' in specification or ']' in specification:
737
+ raise DistlibException("Invalid specification "
738
+ "'%s'" % specification)
739
+ flags = []
740
+ else:
741
+ flags = [f.strip() for f in flags.split(',')]
742
+ result = ExportEntry(name, prefix, suffix, flags)
743
+ return result
744
+
745
+
746
+ def get_cache_base(suffix=None):
747
+ """
748
+ Return the default base location for distlib caches. If the directory does
749
+ not exist, it is created. Use the suffix provided for the base directory,
750
+ and default to '.distlib' if it isn't provided.
751
+
752
+ On Windows, if LOCALAPPDATA is defined in the environment, then it is
753
+ assumed to be a directory, and will be the parent directory of the result.
754
+ On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
755
+ directory - using os.expanduser('~') - will be the parent directory of
756
+ the result.
757
+
758
+ The result is just the directory '.distlib' in the parent directory as
759
+ determined above, or with the name specified with ``suffix``.
760
+ """
761
+ if suffix is None:
762
+ suffix = '.distlib'
763
+ if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
764
+ result = os.path.expandvars('$localappdata')
765
+ else:
766
+ # Assume posix, or old Windows
767
+ result = os.path.expanduser('~')
768
+ # we use 'isdir' instead of 'exists', because we want to
769
+ # fail if there's a file with that name
770
+ if os.path.isdir(result):
771
+ usable = os.access(result, os.W_OK)
772
+ if not usable:
773
+ logger.warning('Directory exists but is not writable: %s', result)
774
+ else:
775
+ try:
776
+ os.makedirs(result)
777
+ usable = True
778
+ except OSError:
779
+ logger.warning('Unable to create %s', result, exc_info=True)
780
+ usable = False
781
+ if not usable:
782
+ result = tempfile.mkdtemp()
783
+ logger.warning('Default location unusable, using %s', result)
784
+ return os.path.join(result, suffix)
785
+
786
+
787
+ def path_to_cache_dir(path):
788
+ """
789
+ Convert an absolute path to a directory name for use in a cache.
790
+
791
+ The algorithm used is:
792
+
793
+ #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
794
+ #. Any occurrence of ``os.sep`` is replaced with ``'--'``.
795
+ #. ``'.cache'`` is appended.
796
+ """
797
+ d, p = os.path.splitdrive(os.path.abspath(path))
798
+ if d:
799
+ d = d.replace(':', '---')
800
+ p = p.replace(os.sep, '--')
801
+ return d + p + '.cache'
802
+
803
+
804
+ def ensure_slash(s):
805
+ if not s.endswith('/'):
806
+ return s + '/'
807
+ return s
808
+
809
+
810
+ def parse_credentials(netloc):
811
+ username = password = None
812
+ if '@' in netloc:
813
+ prefix, netloc = netloc.rsplit('@', 1)
814
+ if ':' not in prefix:
815
+ username = prefix
816
+ else:
817
+ username, password = prefix.split(':', 1)
818
+ if username:
819
+ username = unquote(username)
820
+ if password:
821
+ password = unquote(password)
822
+ return username, password, netloc
823
+
824
+
825
+ def get_process_umask():
826
+ result = os.umask(0o22)
827
+ os.umask(result)
828
+ return result
829
+
830
+ def is_string_sequence(seq):
831
+ result = True
832
+ i = None
833
+ for i, s in enumerate(seq):
834
+ if not isinstance(s, string_types):
835
+ result = False
836
+ break
837
+ assert i is not None
838
+ return result
839
+
840
+ PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
841
+ '([a-z0-9_.+-]+)', re.I)
842
+ PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
843
+
844
+
845
+ def split_filename(filename, project_name=None):
846
+ """
847
+ Extract name, version, python version from a filename (no extension)
848
+
849
+ Return name, version, pyver or None
850
+ """
851
+ result = None
852
+ pyver = None
853
+ filename = unquote(filename).replace(' ', '-')
854
+ m = PYTHON_VERSION.search(filename)
855
+ if m:
856
+ pyver = m.group(1)
857
+ filename = filename[:m.start()]
858
+ if project_name and len(filename) > len(project_name) + 1:
859
+ m = re.match(re.escape(project_name) + r'\b', filename)
860
+ if m:
861
+ n = m.end()
862
+ result = filename[:n], filename[n + 1:], pyver
863
+ if result is None:
864
+ m = PROJECT_NAME_AND_VERSION.match(filename)
865
+ if m:
866
+ result = m.group(1), m.group(3), pyver
867
+ return result
868
+
869
+ # Allow spaces in name because of legacy dists like "Twisted Core"
870
+ NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
871
+ r'\(\s*(?P<ver>[^\s)]+)\)$')
872
+
873
+ def parse_name_and_version(p):
874
+ """
875
+ A utility method used to get name and version from a string.
876
+
877
+ From e.g. a Provides-Dist value.
878
+
879
+ :param p: A value in a form 'foo (1.0)'
880
+ :return: The name and version as a tuple.
881
+ """
882
+ m = NAME_VERSION_RE.match(p)
883
+ if not m:
884
+ raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
885
+ d = m.groupdict()
886
+ return d['name'].strip().lower(), d['ver']
887
+
888
+ def get_extras(requested, available):
889
+ result = set()
890
+ requested = set(requested or [])
891
+ available = set(available or [])
892
+ if '*' in requested:
893
+ requested.remove('*')
894
+ result |= available
895
+ for r in requested:
896
+ if r == '-':
897
+ result.add(r)
898
+ elif r.startswith('-'):
899
+ unwanted = r[1:]
900
+ if unwanted not in available:
901
+ logger.warning('undeclared extra: %s' % unwanted)
902
+ if unwanted in result:
903
+ result.remove(unwanted)
904
+ else:
905
+ if r not in available:
906
+ logger.warning('undeclared extra: %s' % r)
907
+ result.add(r)
908
+ return result
909
+ #
910
+ # Extended metadata functionality
911
+ #
912
+
913
+ def _get_external_data(url):
914
+ result = {}
915
+ try:
916
+ # urlopen might fail if it runs into redirections,
917
+ # because of Python issue #13696. Fixed in locators
918
+ # using a custom redirect handler.
919
+ resp = urlopen(url)
920
+ headers = resp.info()
921
+ ct = headers.get('Content-Type')
922
+ if not ct.startswith('application/json'):
923
+ logger.debug('Unexpected response for JSON request: %s', ct)
924
+ else:
925
+ reader = codecs.getreader('utf-8')(resp)
926
+ #data = reader.read().decode('utf-8')
927
+ #result = json.loads(data)
928
+ result = json.load(reader)
929
+ except Exception as e:
930
+ logger.exception('Failed to get external data for %s: %s', url, e)
931
+ return result
932
+
933
+ _external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
934
+
935
+ def get_project_data(name):
936
+ url = '%s/%s/project.json' % (name[0].upper(), name)
937
+ url = urljoin(_external_data_base_url, url)
938
+ result = _get_external_data(url)
939
+ return result
940
+
941
+ def get_package_data(name, version):
942
+ url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
943
+ url = urljoin(_external_data_base_url, url)
944
+ return _get_external_data(url)
945
+
946
+
947
+ class Cache(object):
948
+ """
949
+ A class implementing a cache for resources that need to live in the file system
950
+ e.g. shared libraries. This class was moved from resources to here because it
951
+ could be used by other modules, e.g. the wheel module.
952
+ """
953
+
954
+ def __init__(self, base):
955
+ """
956
+ Initialise an instance.
957
+
958
+ :param base: The base directory where the cache should be located.
959
+ """
960
+ # we use 'isdir' instead of 'exists', because we want to
961
+ # fail if there's a file with that name
962
+ if not os.path.isdir(base): # pragma: no cover
963
+ os.makedirs(base)
964
+ if (os.stat(base).st_mode & 0o77) != 0:
965
+ logger.warning('Directory \'%s\' is not private', base)
966
+ self.base = os.path.abspath(os.path.normpath(base))
967
+
968
+ def prefix_to_dir(self, prefix):
969
+ """
970
+ Converts a resource prefix to a directory name in the cache.
971
+ """
972
+ return path_to_cache_dir(prefix)
973
+
974
+ def clear(self):
975
+ """
976
+ Clear the cache.
977
+ """
978
+ not_removed = []
979
+ for fn in os.listdir(self.base):
980
+ fn = os.path.join(self.base, fn)
981
+ try:
982
+ if os.path.islink(fn) or os.path.isfile(fn):
983
+ os.remove(fn)
984
+ elif os.path.isdir(fn):
985
+ shutil.rmtree(fn)
986
+ except Exception:
987
+ not_removed.append(fn)
988
+ return not_removed
989
+
990
+
991
+ class EventMixin(object):
992
+ """
993
+ A very simple publish/subscribe system.
994
+ """
995
+ def __init__(self):
996
+ self._subscribers = {}
997
+
998
+ def add(self, event, subscriber, append=True):
999
+ """
1000
+ Add a subscriber for an event.
1001
+
1002
+ :param event: The name of an event.
1003
+ :param subscriber: The subscriber to be added (and called when the
1004
+ event is published).
1005
+ :param append: Whether to append or prepend the subscriber to an
1006
+ existing subscriber list for the event.
1007
+ """
1008
+ subs = self._subscribers
1009
+ if event not in subs:
1010
+ subs[event] = deque([subscriber])
1011
+ else:
1012
+ sq = subs[event]
1013
+ if append:
1014
+ sq.append(subscriber)
1015
+ else:
1016
+ sq.appendleft(subscriber)
1017
+
1018
+ def remove(self, event, subscriber):
1019
+ """
1020
+ Remove a subscriber for an event.
1021
+
1022
+ :param event: The name of an event.
1023
+ :param subscriber: The subscriber to be removed.
1024
+ """
1025
+ subs = self._subscribers
1026
+ if event not in subs:
1027
+ raise ValueError('No subscribers: %r' % event)
1028
+ subs[event].remove(subscriber)
1029
+
1030
+ def get_subscribers(self, event):
1031
+ """
1032
+ Return an iterator for the subscribers for an event.
1033
+ :param event: The event to return subscribers for.
1034
+ """
1035
+ return iter(self._subscribers.get(event, ()))
1036
+
1037
+ def publish(self, event, *args, **kwargs):
1038
+ """
1039
+ Publish a event and return a list of values returned by its
1040
+ subscribers.
1041
+
1042
+ :param event: The event to publish.
1043
+ :param args: The positional arguments to pass to the event's
1044
+ subscribers.
1045
+ :param kwargs: The keyword arguments to pass to the event's
1046
+ subscribers.
1047
+ """
1048
+ result = []
1049
+ for subscriber in self.get_subscribers(event):
1050
+ try:
1051
+ value = subscriber(event, *args, **kwargs)
1052
+ except Exception:
1053
+ logger.exception('Exception during event publication')
1054
+ value = None
1055
+ result.append(value)
1056
+ logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
1057
+ event, args, kwargs, result)
1058
+ return result
1059
+
1060
+ #
1061
+ # Simple sequencing
1062
+ #
1063
+ class Sequencer(object):
1064
+ def __init__(self):
1065
+ self._preds = {}
1066
+ self._succs = {}
1067
+ self._nodes = set() # nodes with no preds/succs
1068
+
1069
+ def add_node(self, node):
1070
+ self._nodes.add(node)
1071
+
1072
+ def remove_node(self, node, edges=False):
1073
+ if node in self._nodes:
1074
+ self._nodes.remove(node)
1075
+ if edges:
1076
+ for p in set(self._preds.get(node, ())):
1077
+ self.remove(p, node)
1078
+ for s in set(self._succs.get(node, ())):
1079
+ self.remove(node, s)
1080
+ # Remove empties
1081
+ for k, v in list(self._preds.items()):
1082
+ if not v:
1083
+ del self._preds[k]
1084
+ for k, v in list(self._succs.items()):
1085
+ if not v:
1086
+ del self._succs[k]
1087
+
1088
+ def add(self, pred, succ):
1089
+ assert pred != succ
1090
+ self._preds.setdefault(succ, set()).add(pred)
1091
+ self._succs.setdefault(pred, set()).add(succ)
1092
+
1093
+ def remove(self, pred, succ):
1094
+ assert pred != succ
1095
+ try:
1096
+ preds = self._preds[succ]
1097
+ succs = self._succs[pred]
1098
+ except KeyError: # pragma: no cover
1099
+ raise ValueError('%r not a successor of anything' % succ)
1100
+ try:
1101
+ preds.remove(pred)
1102
+ succs.remove(succ)
1103
+ except KeyError: # pragma: no cover
1104
+ raise ValueError('%r not a successor of %r' % (succ, pred))
1105
+
1106
+ def is_step(self, step):
1107
+ return (step in self._preds or step in self._succs or
1108
+ step in self._nodes)
1109
+
1110
+ def get_steps(self, final):
1111
+ if not self.is_step(final):
1112
+ raise ValueError('Unknown: %r' % final)
1113
+ result = []
1114
+ todo = []
1115
+ seen = set()
1116
+ todo.append(final)
1117
+ while todo:
1118
+ step = todo.pop(0)
1119
+ if step in seen:
1120
+ # if a step was already seen,
1121
+ # move it to the end (so it will appear earlier
1122
+ # when reversed on return) ... but not for the
1123
+ # final step, as that would be confusing for
1124
+ # users
1125
+ if step != final:
1126
+ result.remove(step)
1127
+ result.append(step)
1128
+ else:
1129
+ seen.add(step)
1130
+ result.append(step)
1131
+ preds = self._preds.get(step, ())
1132
+ todo.extend(preds)
1133
+ return reversed(result)
1134
+
1135
+ @property
1136
+ def strong_connections(self):
1137
+ #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
1138
+ index_counter = [0]
1139
+ stack = []
1140
+ lowlinks = {}
1141
+ index = {}
1142
+ result = []
1143
+
1144
+ graph = self._succs
1145
+
1146
+ def strongconnect(node):
1147
+ # set the depth index for this node to the smallest unused index
1148
+ index[node] = index_counter[0]
1149
+ lowlinks[node] = index_counter[0]
1150
+ index_counter[0] += 1
1151
+ stack.append(node)
1152
+
1153
+ # Consider successors
1154
+ try:
1155
+ successors = graph[node]
1156
+ except Exception:
1157
+ successors = []
1158
+ for successor in successors:
1159
+ if successor not in lowlinks:
1160
+ # Successor has not yet been visited
1161
+ strongconnect(successor)
1162
+ lowlinks[node] = min(lowlinks[node],lowlinks[successor])
1163
+ elif successor in stack:
1164
+ # the successor is in the stack and hence in the current
1165
+ # strongly connected component (SCC)
1166
+ lowlinks[node] = min(lowlinks[node],index[successor])
1167
+
1168
+ # If `node` is a root node, pop the stack and generate an SCC
1169
+ if lowlinks[node] == index[node]:
1170
+ connected_component = []
1171
+
1172
+ while True:
1173
+ successor = stack.pop()
1174
+ connected_component.append(successor)
1175
+ if successor == node: break
1176
+ component = tuple(connected_component)
1177
+ # storing the result
1178
+ result.append(component)
1179
+
1180
+ for node in graph:
1181
+ if node not in lowlinks:
1182
+ strongconnect(node)
1183
+
1184
+ return result
1185
+
1186
+ @property
1187
+ def dot(self):
1188
+ result = ['digraph G {']
1189
+ for succ in self._preds:
1190
+ preds = self._preds[succ]
1191
+ for pred in preds:
1192
+ result.append(' %s -> %s;' % (pred, succ))
1193
+ for node in self._nodes:
1194
+ result.append(' %s;' % node)
1195
+ result.append('}')
1196
+ return '\n'.join(result)
1197
+
1198
+ #
1199
+ # Unarchiving functionality for zip, tar, tgz, tbz, whl
1200
+ #
1201
+
1202
+ ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
1203
+ '.tgz', '.tbz', '.whl')
1204
+
1205
+ def unarchive(archive_filename, dest_dir, format=None, check=True):
1206
+
1207
+ def check_path(path):
1208
+ if not isinstance(path, text_type):
1209
+ path = path.decode('utf-8')
1210
+ p = os.path.abspath(os.path.join(dest_dir, path))
1211
+ if not p.startswith(dest_dir) or p[plen] != os.sep:
1212
+ raise ValueError('path outside destination: %r' % p)
1213
+
1214
+ dest_dir = os.path.abspath(dest_dir)
1215
+ plen = len(dest_dir)
1216
+ archive = None
1217
+ if format is None:
1218
+ if archive_filename.endswith(('.zip', '.whl')):
1219
+ format = 'zip'
1220
+ elif archive_filename.endswith(('.tar.gz', '.tgz')):
1221
+ format = 'tgz'
1222
+ mode = 'r:gz'
1223
+ elif archive_filename.endswith(('.tar.bz2', '.tbz')):
1224
+ format = 'tbz'
1225
+ mode = 'r:bz2'
1226
+ elif archive_filename.endswith('.tar'):
1227
+ format = 'tar'
1228
+ mode = 'r'
1229
+ else: # pragma: no cover
1230
+ raise ValueError('Unknown format for %r' % archive_filename)
1231
+ try:
1232
+ if format == 'zip':
1233
+ archive = ZipFile(archive_filename, 'r')
1234
+ if check:
1235
+ names = archive.namelist()
1236
+ for name in names:
1237
+ check_path(name)
1238
+ else:
1239
+ archive = tarfile.open(archive_filename, mode)
1240
+ if check:
1241
+ names = archive.getnames()
1242
+ for name in names:
1243
+ check_path(name)
1244
+ if format != 'zip' and sys.version_info[0] < 3:
1245
+ # See Python issue 17153. If the dest path contains Unicode,
1246
+ # tarfile extraction fails on Python 2.x if a member path name
1247
+ # contains non-ASCII characters - it leads to an implicit
1248
+ # bytes -> unicode conversion using ASCII to decode.
1249
+ for tarinfo in archive.getmembers():
1250
+ if not isinstance(tarinfo.name, text_type):
1251
+ tarinfo.name = tarinfo.name.decode('utf-8')
1252
+ archive.extractall(dest_dir)
1253
+
1254
+ finally:
1255
+ if archive:
1256
+ archive.close()
1257
+
1258
+
1259
+ def zip_dir(directory):
1260
+ """zip a directory tree into a BytesIO object"""
1261
+ result = io.BytesIO()
1262
+ dlen = len(directory)
1263
+ with ZipFile(result, "w") as zf:
1264
+ for root, dirs, files in os.walk(directory):
1265
+ for name in files:
1266
+ full = os.path.join(root, name)
1267
+ rel = root[dlen:]
1268
+ dest = os.path.join(rel, name)
1269
+ zf.write(full, dest)
1270
+ return result
1271
+
1272
+ #
1273
+ # Simple progress bar
1274
+ #
1275
+
1276
+ UNITS = ('', 'K', 'M', 'G','T','P')
1277
+
1278
+
1279
+ class Progress(object):
1280
+ unknown = 'UNKNOWN'
1281
+
1282
+ def __init__(self, minval=0, maxval=100):
1283
+ assert maxval is None or maxval >= minval
1284
+ self.min = self.cur = minval
1285
+ self.max = maxval
1286
+ self.started = None
1287
+ self.elapsed = 0
1288
+ self.done = False
1289
+
1290
+ def update(self, curval):
1291
+ assert self.min <= curval
1292
+ assert self.max is None or curval <= self.max
1293
+ self.cur = curval
1294
+ now = time.time()
1295
+ if self.started is None:
1296
+ self.started = now
1297
+ else:
1298
+ self.elapsed = now - self.started
1299
+
1300
+ def increment(self, incr):
1301
+ assert incr >= 0
1302
+ self.update(self.cur + incr)
1303
+
1304
+ def start(self):
1305
+ self.update(self.min)
1306
+ return self
1307
+
1308
+ def stop(self):
1309
+ if self.max is not None:
1310
+ self.update(self.max)
1311
+ self.done = True
1312
+
1313
+ @property
1314
+ def maximum(self):
1315
+ return self.unknown if self.max is None else self.max
1316
+
1317
+ @property
1318
+ def percentage(self):
1319
+ if self.done:
1320
+ result = '100 %'
1321
+ elif self.max is None:
1322
+ result = ' ?? %'
1323
+ else:
1324
+ v = 100.0 * (self.cur - self.min) / (self.max - self.min)
1325
+ result = '%3d %%' % v
1326
+ return result
1327
+
1328
+ def format_duration(self, duration):
1329
+ if (duration <= 0) and self.max is None or self.cur == self.min:
1330
+ result = '??:??:??'
1331
+ #elif duration < 1:
1332
+ # result = '--:--:--'
1333
+ else:
1334
+ result = time.strftime('%H:%M:%S', time.gmtime(duration))
1335
+ return result
1336
+
1337
+ @property
1338
+ def ETA(self):
1339
+ if self.done:
1340
+ prefix = 'Done'
1341
+ t = self.elapsed
1342
+ #import pdb; pdb.set_trace()
1343
+ else:
1344
+ prefix = 'ETA '
1345
+ if self.max is None:
1346
+ t = -1
1347
+ elif self.elapsed == 0 or (self.cur == self.min):
1348
+ t = 0
1349
+ else:
1350
+ #import pdb; pdb.set_trace()
1351
+ t = float(self.max - self.min)
1352
+ t /= self.cur - self.min
1353
+ t = (t - 1) * self.elapsed
1354
+ return '%s: %s' % (prefix, self.format_duration(t))
1355
+
1356
+ @property
1357
+ def speed(self):
1358
+ if self.elapsed == 0:
1359
+ result = 0.0
1360
+ else:
1361
+ result = (self.cur - self.min) / self.elapsed
1362
+ for unit in UNITS:
1363
+ if result < 1000:
1364
+ break
1365
+ result /= 1000.0
1366
+ return '%d %sB/s' % (result, unit)
1367
+
1368
+ #
1369
+ # Glob functionality
1370
+ #
1371
+
1372
+ RICH_GLOB = re.compile(r'\{([^}]*)\}')
1373
+ _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
1374
+ _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
1375
+
1376
+
1377
+ def iglob(path_glob):
1378
+ """Extended globbing function that supports ** and {opt1,opt2,opt3}."""
1379
+ if _CHECK_RECURSIVE_GLOB.search(path_glob):
1380
+ msg = """invalid glob %r: recursive glob "**" must be used alone"""
1381
+ raise ValueError(msg % path_glob)
1382
+ if _CHECK_MISMATCH_SET.search(path_glob):
1383
+ msg = """invalid glob %r: mismatching set marker '{' or '}'"""
1384
+ raise ValueError(msg % path_glob)
1385
+ return _iglob(path_glob)
1386
+
1387
+
1388
+ def _iglob(path_glob):
1389
+ rich_path_glob = RICH_GLOB.split(path_glob, 1)
1390
+ if len(rich_path_glob) > 1:
1391
+ assert len(rich_path_glob) == 3, rich_path_glob
1392
+ prefix, set, suffix = rich_path_glob
1393
+ for item in set.split(','):
1394
+ for path in _iglob(''.join((prefix, item, suffix))):
1395
+ yield path
1396
+ else:
1397
+ if '**' not in path_glob:
1398
+ for item in std_iglob(path_glob):
1399
+ yield item
1400
+ else:
1401
+ prefix, radical = path_glob.split('**', 1)
1402
+ if prefix == '':
1403
+ prefix = '.'
1404
+ if radical == '':
1405
+ radical = '*'
1406
+ else:
1407
+ # we support both
1408
+ radical = radical.lstrip('/')
1409
+ radical = radical.lstrip('\\')
1410
+ for path, dir, files in os.walk(prefix):
1411
+ path = os.path.normpath(path)
1412
+ for fn in _iglob(os.path.join(path, radical)):
1413
+ yield fn
1414
+
1415
+ if ssl:
1416
+ from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
1417
+ CertificateError)
1418
+
1419
+
1420
+ #
1421
+ # HTTPSConnection which verifies certificates/matches domains
1422
+ #
1423
+
1424
+ class HTTPSConnection(httplib.HTTPSConnection):
1425
+ ca_certs = None # set this to the path to the certs file (.pem)
1426
+ check_domain = True # only used if ca_certs is not None
1427
+
1428
+ # noinspection PyPropertyAccess
1429
+ def connect(self):
1430
+ sock = socket.create_connection((self.host, self.port), self.timeout)
1431
+ if getattr(self, '_tunnel_host', False):
1432
+ self.sock = sock
1433
+ self._tunnel()
1434
+
1435
+ context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
1436
+ if hasattr(ssl, 'OP_NO_SSLv2'):
1437
+ context.options |= ssl.OP_NO_SSLv2
1438
+ if self.cert_file:
1439
+ context.load_cert_chain(self.cert_file, self.key_file)
1440
+ kwargs = {}
1441
+ if self.ca_certs:
1442
+ context.verify_mode = ssl.CERT_REQUIRED
1443
+ context.load_verify_locations(cafile=self.ca_certs)
1444
+ if getattr(ssl, 'HAS_SNI', False):
1445
+ kwargs['server_hostname'] = self.host
1446
+
1447
+ self.sock = context.wrap_socket(sock, **kwargs)
1448
+ if self.ca_certs and self.check_domain:
1449
+ try:
1450
+ match_hostname(self.sock.getpeercert(), self.host)
1451
+ logger.debug('Host verified: %s', self.host)
1452
+ except CertificateError: # pragma: no cover
1453
+ self.sock.shutdown(socket.SHUT_RDWR)
1454
+ self.sock.close()
1455
+ raise
1456
+
1457
+ class HTTPSHandler(BaseHTTPSHandler):
1458
+ def __init__(self, ca_certs, check_domain=True):
1459
+ BaseHTTPSHandler.__init__(self)
1460
+ self.ca_certs = ca_certs
1461
+ self.check_domain = check_domain
1462
+
1463
+ def _conn_maker(self, *args, **kwargs):
1464
+ """
1465
+ This is called to create a connection instance. Normally you'd
1466
+ pass a connection class to do_open, but it doesn't actually check for
1467
+ a class, and just expects a callable. As long as we behave just as a
1468
+ constructor would have, we should be OK. If it ever changes so that
1469
+ we *must* pass a class, we'll create an UnsafeHTTPSConnection class
1470
+ which just sets check_domain to False in the class definition, and
1471
+ choose which one to pass to do_open.
1472
+ """
1473
+ result = HTTPSConnection(*args, **kwargs)
1474
+ if self.ca_certs:
1475
+ result.ca_certs = self.ca_certs
1476
+ result.check_domain = self.check_domain
1477
+ return result
1478
+
1479
+ def https_open(self, req):
1480
+ try:
1481
+ return self.do_open(self._conn_maker, req)
1482
+ except URLError as e:
1483
+ if 'certificate verify failed' in str(e.reason):
1484
+ raise CertificateError('Unable to verify server certificate '
1485
+ 'for %s' % req.host)
1486
+ else:
1487
+ raise
1488
+
1489
+ #
1490
+ # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
1491
+ # Middle proxy using HTTP listens on port 443, or an index mistakenly serves
1492
+ # HTML containing a http://xyz link when it should be https://xyz),
1493
+ # you can use the following handler class, which does not allow HTTP traffic.
1494
+ #
1495
+ # It works by inheriting from HTTPHandler - so build_opener won't add a
1496
+ # handler for HTTP itself.
1497
+ #
1498
+ class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
1499
+ def http_open(self, req):
1500
+ raise URLError('Unexpected HTTP request on what should be a secure '
1501
+ 'connection: %s' % req)
1502
+
1503
+ #
1504
+ # XML-RPC with timeouts
1505
+ #
1506
+ class Transport(xmlrpclib.Transport):
1507
+ def __init__(self, timeout, use_datetime=0):
1508
+ self.timeout = timeout
1509
+ xmlrpclib.Transport.__init__(self, use_datetime)
1510
+
1511
+ def make_connection(self, host):
1512
+ h, eh, x509 = self.get_host_info(host)
1513
+ if not self._connection or host != self._connection[0]:
1514
+ self._extra_headers = eh
1515
+ self._connection = host, httplib.HTTPConnection(h)
1516
+ return self._connection[1]
1517
+
1518
+ if ssl:
1519
+ class SafeTransport(xmlrpclib.SafeTransport):
1520
+ def __init__(self, timeout, use_datetime=0):
1521
+ self.timeout = timeout
1522
+ xmlrpclib.SafeTransport.__init__(self, use_datetime)
1523
+
1524
+ def make_connection(self, host):
1525
+ h, eh, kwargs = self.get_host_info(host)
1526
+ if not kwargs:
1527
+ kwargs = {}
1528
+ kwargs['timeout'] = self.timeout
1529
+ if not self._connection or host != self._connection[0]:
1530
+ self._extra_headers = eh
1531
+ self._connection = host, httplib.HTTPSConnection(h, None,
1532
+ **kwargs)
1533
+ return self._connection[1]
1534
+
1535
+
1536
+ class ServerProxy(xmlrpclib.ServerProxy):
1537
+ def __init__(self, uri, **kwargs):
1538
+ self.timeout = timeout = kwargs.pop('timeout', None)
1539
+ # The above classes only come into play if a timeout
1540
+ # is specified
1541
+ if timeout is not None:
1542
+ # scheme = splittype(uri) # deprecated as of Python 3.8
1543
+ scheme = urlparse(uri)[0]
1544
+ use_datetime = kwargs.get('use_datetime', 0)
1545
+ if scheme == 'https':
1546
+ tcls = SafeTransport
1547
+ else:
1548
+ tcls = Transport
1549
+ kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
1550
+ self.transport = t
1551
+ xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
1552
+
1553
+ #
1554
+ # CSV functionality. This is provided because on 2.x, the csv module can't
1555
+ # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
1556
+ #
1557
+
1558
+ def _csv_open(fn, mode, **kwargs):
1559
+ if sys.version_info[0] < 3:
1560
+ mode += 'b'
1561
+ else:
1562
+ kwargs['newline'] = ''
1563
+ # Python 3 determines encoding from locale. Force 'utf-8'
1564
+ # file encoding to match other forced utf-8 encoding
1565
+ kwargs['encoding'] = 'utf-8'
1566
+ return open(fn, mode, **kwargs)
1567
+
1568
+
1569
+ class CSVBase(object):
1570
+ defaults = {
1571
+ 'delimiter': str(','), # The strs are used because we need native
1572
+ 'quotechar': str('"'), # str in the csv API (2.x won't take
1573
+ 'lineterminator': str('\n') # Unicode)
1574
+ }
1575
+
1576
+ def __enter__(self):
1577
+ return self
1578
+
1579
+ def __exit__(self, *exc_info):
1580
+ self.stream.close()
1581
+
1582
+
1583
+ class CSVReader(CSVBase):
1584
+ def __init__(self, **kwargs):
1585
+ if 'stream' in kwargs:
1586
+ stream = kwargs['stream']
1587
+ if sys.version_info[0] >= 3:
1588
+ # needs to be a text stream
1589
+ stream = codecs.getreader('utf-8')(stream)
1590
+ self.stream = stream
1591
+ else:
1592
+ self.stream = _csv_open(kwargs['path'], 'r')
1593
+ self.reader = csv.reader(self.stream, **self.defaults)
1594
+
1595
+ def __iter__(self):
1596
+ return self
1597
+
1598
+ def next(self):
1599
+ result = next(self.reader)
1600
+ if sys.version_info[0] < 3:
1601
+ for i, item in enumerate(result):
1602
+ if not isinstance(item, text_type):
1603
+ result[i] = item.decode('utf-8')
1604
+ return result
1605
+
1606
+ __next__ = next
1607
+
1608
+ class CSVWriter(CSVBase):
1609
+ def __init__(self, fn, **kwargs):
1610
+ self.stream = _csv_open(fn, 'w')
1611
+ self.writer = csv.writer(self.stream, **self.defaults)
1612
+
1613
+ def writerow(self, row):
1614
+ if sys.version_info[0] < 3:
1615
+ r = []
1616
+ for item in row:
1617
+ if isinstance(item, text_type):
1618
+ item = item.encode('utf-8')
1619
+ r.append(item)
1620
+ row = r
1621
+ self.writer.writerow(row)
1622
+
1623
+ #
1624
+ # Configurator functionality
1625
+ #
1626
+
1627
+ class Configurator(BaseConfigurator):
1628
+
1629
+ value_converters = dict(BaseConfigurator.value_converters)
1630
+ value_converters['inc'] = 'inc_convert'
1631
+
1632
+ def __init__(self, config, base=None):
1633
+ super(Configurator, self).__init__(config)
1634
+ self.base = base or os.getcwd()
1635
+
1636
+ def configure_custom(self, config):
1637
+ def convert(o):
1638
+ if isinstance(o, (list, tuple)):
1639
+ result = type(o)([convert(i) for i in o])
1640
+ elif isinstance(o, dict):
1641
+ if '()' in o:
1642
+ result = self.configure_custom(o)
1643
+ else:
1644
+ result = {}
1645
+ for k in o:
1646
+ result[k] = convert(o[k])
1647
+ else:
1648
+ result = self.convert(o)
1649
+ return result
1650
+
1651
+ c = config.pop('()')
1652
+ if not callable(c):
1653
+ c = self.resolve(c)
1654
+ props = config.pop('.', None)
1655
+ # Check for valid identifiers
1656
+ args = config.pop('[]', ())
1657
+ if args:
1658
+ args = tuple([convert(o) for o in args])
1659
+ items = [(k, convert(config[k])) for k in config if valid_ident(k)]
1660
+ kwargs = dict(items)
1661
+ result = c(*args, **kwargs)
1662
+ if props:
1663
+ for n, v in props.items():
1664
+ setattr(result, n, convert(v))
1665
+ return result
1666
+
1667
+ def __getitem__(self, key):
1668
+ result = self.config[key]
1669
+ if isinstance(result, dict) and '()' in result:
1670
+ self.config[key] = result = self.configure_custom(result)
1671
+ return result
1672
+
1673
+ def inc_convert(self, value):
1674
+ """Default converter for the inc:// protocol."""
1675
+ if not os.path.isabs(value):
1676
+ value = os.path.join(self.base, value)
1677
+ with codecs.open(value, 'r', encoding='utf-8') as f:
1678
+ result = json.load(f)
1679
+ return result
1680
+
1681
+
1682
+ class SubprocessMixin(object):
1683
+ """
1684
+ Mixin for running subprocesses and capturing their output
1685
+ """
1686
+ def __init__(self, verbose=False, progress=None):
1687
+ self.verbose = verbose
1688
+ self.progress = progress
1689
+
1690
+ def reader(self, stream, context):
1691
+ """
1692
+ Read lines from a subprocess' output stream and either pass to a progress
1693
+ callable (if specified) or write progress information to sys.stderr.
1694
+ """
1695
+ progress = self.progress
1696
+ verbose = self.verbose
1697
+ while True:
1698
+ s = stream.readline()
1699
+ if not s:
1700
+ break
1701
+ if progress is not None:
1702
+ progress(s, context)
1703
+ else:
1704
+ if not verbose:
1705
+ sys.stderr.write('.')
1706
+ else:
1707
+ sys.stderr.write(s.decode('utf-8'))
1708
+ sys.stderr.flush()
1709
+ stream.close()
1710
+
1711
+ def run_command(self, cmd, **kwargs):
1712
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
1713
+ stderr=subprocess.PIPE, **kwargs)
1714
+ t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
1715
+ t1.start()
1716
+ t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
1717
+ t2.start()
1718
+ p.wait()
1719
+ t1.join()
1720
+ t2.join()
1721
+ if self.progress is not None:
1722
+ self.progress('done.', 'main')
1723
+ elif self.verbose:
1724
+ sys.stderr.write('done.\n')
1725
+ return p
1726
+
1727
+
1728
+ def normalize_name(name):
1729
+ """Normalize a python package name a la PEP 503"""
1730
+ # https://www.python.org/dev/peps/pep-0503/#normalized-names
1731
+ return re.sub('[-_.]+', '-', name).lower()
1732
+
1733
+ # def _get_pypirc_command():
1734
+ # """
1735
+ # Get the distutils command for interacting with PyPI configurations.
1736
+ # :return: the command.
1737
+ # """
1738
+ # from distutils.core import Distribution
1739
+ # from distutils.config import PyPIRCCommand
1740
+ # d = Distribution()
1741
+ # return PyPIRCCommand(d)
1742
+
1743
+ class PyPIRCFile(object):
1744
+
1745
+ DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
1746
+ DEFAULT_REALM = 'pypi'
1747
+
1748
+ def __init__(self, fn=None, url=None):
1749
+ if fn is None:
1750
+ fn = os.path.join(os.path.expanduser('~'), '.pypirc')
1751
+ self.filename = fn
1752
+ self.url = url
1753
+
1754
+ def read(self):
1755
+ result = {}
1756
+
1757
+ if os.path.exists(self.filename):
1758
+ repository = self.url or self.DEFAULT_REPOSITORY
1759
+
1760
+ config = configparser.RawConfigParser()
1761
+ config.read(self.filename)
1762
+ sections = config.sections()
1763
+ if 'distutils' in sections:
1764
+ # let's get the list of servers
1765
+ index_servers = config.get('distutils', 'index-servers')
1766
+ _servers = [server.strip() for server in
1767
+ index_servers.split('\n')
1768
+ if server.strip() != '']
1769
+ if _servers == []:
1770
+ # nothing set, let's try to get the default pypi
1771
+ if 'pypi' in sections:
1772
+ _servers = ['pypi']
1773
+ else:
1774
+ for server in _servers:
1775
+ result = {'server': server}
1776
+ result['username'] = config.get(server, 'username')
1777
+
1778
+ # optional params
1779
+ for key, default in (('repository', self.DEFAULT_REPOSITORY),
1780
+ ('realm', self.DEFAULT_REALM),
1781
+ ('password', None)):
1782
+ if config.has_option(server, key):
1783
+ result[key] = config.get(server, key)
1784
+ else:
1785
+ result[key] = default
1786
+
1787
+ # work around people having "repository" for the "pypi"
1788
+ # section of their config set to the HTTP (rather than
1789
+ # HTTPS) URL
1790
+ if (server == 'pypi' and
1791
+ repository in (self.DEFAULT_REPOSITORY, 'pypi')):
1792
+ result['repository'] = self.DEFAULT_REPOSITORY
1793
+ elif (result['server'] != repository and
1794
+ result['repository'] != repository):
1795
+ result = {}
1796
+ elif 'server-login' in sections:
1797
+ # old format
1798
+ server = 'server-login'
1799
+ if config.has_option(server, 'repository'):
1800
+ repository = config.get(server, 'repository')
1801
+ else:
1802
+ repository = self.DEFAULT_REPOSITORY
1803
+ result = {
1804
+ 'username': config.get(server, 'username'),
1805
+ 'password': config.get(server, 'password'),
1806
+ 'repository': repository,
1807
+ 'server': server,
1808
+ 'realm': self.DEFAULT_REALM
1809
+ }
1810
+ return result
1811
+
1812
+ def update(self, username, password):
1813
+ # import pdb; pdb.set_trace()
1814
+ config = configparser.RawConfigParser()
1815
+ fn = self.filename
1816
+ config.read(fn)
1817
+ if not config.has_section('pypi'):
1818
+ config.add_section('pypi')
1819
+ config.set('pypi', 'username', username)
1820
+ config.set('pypi', 'password', password)
1821
+ with open(fn, 'w') as f:
1822
+ config.write(f)
1823
+
1824
+ def _load_pypirc(index):
1825
+ """
1826
+ Read the PyPI access configuration as supported by distutils.
1827
+ """
1828
+ return PyPIRCFile(url=index.url).read()
1829
+
1830
+ def _store_pypirc(index):
1831
+ PyPIRCFile().update(index.username, index.password)
1832
+
1833
+ #
1834
+ # get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor
1835
+ # tweaks
1836
+ #
1837
+
1838
+ def get_host_platform():
1839
+ """Return a string that identifies the current platform. This is used mainly to
1840
+ distinguish platform-specific build directories and platform-specific built
1841
+ distributions. Typically includes the OS name and version and the
1842
+ architecture (as supplied by 'os.uname()'), although the exact information
1843
+ included depends on the OS; eg. on Linux, the kernel version isn't
1844
+ particularly important.
1845
+
1846
+ Examples of returned values:
1847
+ linux-i586
1848
+ linux-alpha (?)
1849
+ solaris-2.6-sun4u
1850
+
1851
+ Windows will return one of:
1852
+ win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
1853
+ win32 (all others - specifically, sys.platform is returned)
1854
+
1855
+ For other non-POSIX platforms, currently just returns 'sys.platform'.
1856
+
1857
+ """
1858
+ if os.name == 'nt':
1859
+ if 'amd64' in sys.version.lower():
1860
+ return 'win-amd64'
1861
+ if '(arm)' in sys.version.lower():
1862
+ return 'win-arm32'
1863
+ if '(arm64)' in sys.version.lower():
1864
+ return 'win-arm64'
1865
+ return sys.platform
1866
+
1867
+ # Set for cross builds explicitly
1868
+ if "_PYTHON_HOST_PLATFORM" in os.environ:
1869
+ return os.environ["_PYTHON_HOST_PLATFORM"]
1870
+
1871
+ if os.name != 'posix' or not hasattr(os, 'uname'):
1872
+ # XXX what about the architecture? NT is Intel or Alpha,
1873
+ # Mac OS is M68k or PPC, etc.
1874
+ return sys.platform
1875
+
1876
+ # Try to distinguish various flavours of Unix
1877
+
1878
+ (osname, host, release, version, machine) = os.uname()
1879
+
1880
+ # Convert the OS name to lowercase, remove '/' characters, and translate
1881
+ # spaces (for "Power Macintosh")
1882
+ osname = osname.lower().replace('/', '')
1883
+ machine = machine.replace(' ', '_').replace('/', '-')
1884
+
1885
+ if osname[:5] == 'linux':
1886
+ # At least on Linux/Intel, 'machine' is the processor --
1887
+ # i386, etc.
1888
+ # XXX what about Alpha, SPARC, etc?
1889
+ return "%s-%s" % (osname, machine)
1890
+
1891
+ elif osname[:5] == 'sunos':
1892
+ if release[0] >= '5': # SunOS 5 == Solaris 2
1893
+ osname = 'solaris'
1894
+ release = '%d.%s' % (int(release[0]) - 3, release[2:])
1895
+ # We can't use 'platform.architecture()[0]' because a
1896
+ # bootstrap problem. We use a dict to get an error
1897
+ # if some suspicious happens.
1898
+ bitness = {2147483647:'32bit', 9223372036854775807:'64bit'}
1899
+ machine += '.%s' % bitness[sys.maxsize]
1900
+ # fall through to standard osname-release-machine representation
1901
+ elif osname[:3] == 'aix':
1902
+ from _aix_support import aix_platform
1903
+ return aix_platform()
1904
+ elif osname[:6] == 'cygwin':
1905
+ osname = 'cygwin'
1906
+ rel_re = re.compile (r'[\d.]+', re.ASCII)
1907
+ m = rel_re.match(release)
1908
+ if m:
1909
+ release = m.group()
1910
+ elif osname[:6] == 'darwin':
1911
+ import _osx_support, distutils.sysconfig
1912
+ osname, release, machine = _osx_support.get_platform_osx(
1913
+ distutils.sysconfig.get_config_vars(),
1914
+ osname, release, machine)
1915
+
1916
+ return '%s-%s-%s' % (osname, release, machine)
1917
+
1918
+
1919
+ _TARGET_TO_PLAT = {
1920
+ 'x86' : 'win32',
1921
+ 'x64' : 'win-amd64',
1922
+ 'arm' : 'win-arm32',
1923
+ }
1924
+
1925
+
1926
+ def get_platform():
1927
+ if os.name != 'nt':
1928
+ return get_host_platform()
1929
+ cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')
1930
+ if cross_compilation_target not in _TARGET_TO_PLAT:
1931
+ return get_host_platform()
1932
+ return _TARGET_TO_PLAT[cross_compilation_target]
venv/lib/python3.10/site-packages/pip/_vendor/distlib/version.py ADDED
@@ -0,0 +1,739 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2012-2017 The Python Software Foundation.
4
+ # See LICENSE.txt and CONTRIBUTORS.txt.
5
+ #
6
+ """
7
+ Implementation of a flexible versioning scheme providing support for PEP-440,
8
+ setuptools-compatible and semantic versioning.
9
+ """
10
+
11
+ import logging
12
+ import re
13
+
14
+ from .compat import string_types
15
+ from .util import parse_requirement
16
+
17
+ __all__ = ['NormalizedVersion', 'NormalizedMatcher',
18
+ 'LegacyVersion', 'LegacyMatcher',
19
+ 'SemanticVersion', 'SemanticMatcher',
20
+ 'UnsupportedVersionError', 'get_scheme']
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class UnsupportedVersionError(ValueError):
26
+ """This is an unsupported version."""
27
+ pass
28
+
29
+
30
+ class Version(object):
31
+ def __init__(self, s):
32
+ self._string = s = s.strip()
33
+ self._parts = parts = self.parse(s)
34
+ assert isinstance(parts, tuple)
35
+ assert len(parts) > 0
36
+
37
+ def parse(self, s):
38
+ raise NotImplementedError('please implement in a subclass')
39
+
40
+ def _check_compatible(self, other):
41
+ if type(self) != type(other):
42
+ raise TypeError('cannot compare %r and %r' % (self, other))
43
+
44
+ def __eq__(self, other):
45
+ self._check_compatible(other)
46
+ return self._parts == other._parts
47
+
48
+ def __ne__(self, other):
49
+ return not self.__eq__(other)
50
+
51
+ def __lt__(self, other):
52
+ self._check_compatible(other)
53
+ return self._parts < other._parts
54
+
55
+ def __gt__(self, other):
56
+ return not (self.__lt__(other) or self.__eq__(other))
57
+
58
+ def __le__(self, other):
59
+ return self.__lt__(other) or self.__eq__(other)
60
+
61
+ def __ge__(self, other):
62
+ return self.__gt__(other) or self.__eq__(other)
63
+
64
+ # See http://docs.python.org/reference/datamodel#object.__hash__
65
+ def __hash__(self):
66
+ return hash(self._parts)
67
+
68
+ def __repr__(self):
69
+ return "%s('%s')" % (self.__class__.__name__, self._string)
70
+
71
+ def __str__(self):
72
+ return self._string
73
+
74
+ @property
75
+ def is_prerelease(self):
76
+ raise NotImplementedError('Please implement in subclasses.')
77
+
78
+
79
+ class Matcher(object):
80
+ version_class = None
81
+
82
+ # value is either a callable or the name of a method
83
+ _operators = {
84
+ '<': lambda v, c, p: v < c,
85
+ '>': lambda v, c, p: v > c,
86
+ '<=': lambda v, c, p: v == c or v < c,
87
+ '>=': lambda v, c, p: v == c or v > c,
88
+ '==': lambda v, c, p: v == c,
89
+ '===': lambda v, c, p: v == c,
90
+ # by default, compatible => >=.
91
+ '~=': lambda v, c, p: v == c or v > c,
92
+ '!=': lambda v, c, p: v != c,
93
+ }
94
+
95
+ # this is a method only to support alternative implementations
96
+ # via overriding
97
+ def parse_requirement(self, s):
98
+ return parse_requirement(s)
99
+
100
+ def __init__(self, s):
101
+ if self.version_class is None:
102
+ raise ValueError('Please specify a version class')
103
+ self._string = s = s.strip()
104
+ r = self.parse_requirement(s)
105
+ if not r:
106
+ raise ValueError('Not valid: %r' % s)
107
+ self.name = r.name
108
+ self.key = self.name.lower() # for case-insensitive comparisons
109
+ clist = []
110
+ if r.constraints:
111
+ # import pdb; pdb.set_trace()
112
+ for op, s in r.constraints:
113
+ if s.endswith('.*'):
114
+ if op not in ('==', '!='):
115
+ raise ValueError('\'.*\' not allowed for '
116
+ '%r constraints' % op)
117
+ # Could be a partial version (e.g. for '2.*') which
118
+ # won't parse as a version, so keep it as a string
119
+ vn, prefix = s[:-2], True
120
+ # Just to check that vn is a valid version
121
+ self.version_class(vn)
122
+ else:
123
+ # Should parse as a version, so we can create an
124
+ # instance for the comparison
125
+ vn, prefix = self.version_class(s), False
126
+ clist.append((op, vn, prefix))
127
+ self._parts = tuple(clist)
128
+
129
+ def match(self, version):
130
+ """
131
+ Check if the provided version matches the constraints.
132
+
133
+ :param version: The version to match against this instance.
134
+ :type version: String or :class:`Version` instance.
135
+ """
136
+ if isinstance(version, string_types):
137
+ version = self.version_class(version)
138
+ for operator, constraint, prefix in self._parts:
139
+ f = self._operators.get(operator)
140
+ if isinstance(f, string_types):
141
+ f = getattr(self, f)
142
+ if not f:
143
+ msg = ('%r not implemented '
144
+ 'for %s' % (operator, self.__class__.__name__))
145
+ raise NotImplementedError(msg)
146
+ if not f(version, constraint, prefix):
147
+ return False
148
+ return True
149
+
150
+ @property
151
+ def exact_version(self):
152
+ result = None
153
+ if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
154
+ result = self._parts[0][1]
155
+ return result
156
+
157
+ def _check_compatible(self, other):
158
+ if type(self) != type(other) or self.name != other.name:
159
+ raise TypeError('cannot compare %s and %s' % (self, other))
160
+
161
+ def __eq__(self, other):
162
+ self._check_compatible(other)
163
+ return self.key == other.key and self._parts == other._parts
164
+
165
+ def __ne__(self, other):
166
+ return not self.__eq__(other)
167
+
168
+ # See http://docs.python.org/reference/datamodel#object.__hash__
169
+ def __hash__(self):
170
+ return hash(self.key) + hash(self._parts)
171
+
172
+ def __repr__(self):
173
+ return "%s(%r)" % (self.__class__.__name__, self._string)
174
+
175
+ def __str__(self):
176
+ return self._string
177
+
178
+
179
+ PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
180
+ r'(\.(post)(\d+))?(\.(dev)(\d+))?'
181
+ r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
182
+
183
+
184
+ def _pep_440_key(s):
185
+ s = s.strip()
186
+ m = PEP440_VERSION_RE.match(s)
187
+ if not m:
188
+ raise UnsupportedVersionError('Not a valid version: %s' % s)
189
+ groups = m.groups()
190
+ nums = tuple(int(v) for v in groups[1].split('.'))
191
+ while len(nums) > 1 and nums[-1] == 0:
192
+ nums = nums[:-1]
193
+
194
+ if not groups[0]:
195
+ epoch = 0
196
+ else:
197
+ epoch = int(groups[0][:-1])
198
+ pre = groups[4:6]
199
+ post = groups[7:9]
200
+ dev = groups[10:12]
201
+ local = groups[13]
202
+ if pre == (None, None):
203
+ pre = ()
204
+ else:
205
+ pre = pre[0], int(pre[1])
206
+ if post == (None, None):
207
+ post = ()
208
+ else:
209
+ post = post[0], int(post[1])
210
+ if dev == (None, None):
211
+ dev = ()
212
+ else:
213
+ dev = dev[0], int(dev[1])
214
+ if local is None:
215
+ local = ()
216
+ else:
217
+ parts = []
218
+ for part in local.split('.'):
219
+ # to ensure that numeric compares as > lexicographic, avoid
220
+ # comparing them directly, but encode a tuple which ensures
221
+ # correct sorting
222
+ if part.isdigit():
223
+ part = (1, int(part))
224
+ else:
225
+ part = (0, part)
226
+ parts.append(part)
227
+ local = tuple(parts)
228
+ if not pre:
229
+ # either before pre-release, or final release and after
230
+ if not post and dev:
231
+ # before pre-release
232
+ pre = ('a', -1) # to sort before a0
233
+ else:
234
+ pre = ('z',) # to sort after all pre-releases
235
+ # now look at the state of post and dev.
236
+ if not post:
237
+ post = ('_',) # sort before 'a'
238
+ if not dev:
239
+ dev = ('final',)
240
+
241
+ #print('%s -> %s' % (s, m.groups()))
242
+ return epoch, nums, pre, post, dev, local
243
+
244
+
245
+ _normalized_key = _pep_440_key
246
+
247
+
248
+ class NormalizedVersion(Version):
249
+ """A rational version.
250
+
251
+ Good:
252
+ 1.2 # equivalent to "1.2.0"
253
+ 1.2.0
254
+ 1.2a1
255
+ 1.2.3a2
256
+ 1.2.3b1
257
+ 1.2.3c1
258
+ 1.2.3.4
259
+ TODO: fill this out
260
+
261
+ Bad:
262
+ 1 # minimum two numbers
263
+ 1.2a # release level must have a release serial
264
+ 1.2.3b
265
+ """
266
+ def parse(self, s):
267
+ result = _normalized_key(s)
268
+ # _normalized_key loses trailing zeroes in the release
269
+ # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
270
+ # However, PEP 440 prefix matching needs it: for example,
271
+ # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
272
+ m = PEP440_VERSION_RE.match(s) # must succeed
273
+ groups = m.groups()
274
+ self._release_clause = tuple(int(v) for v in groups[1].split('.'))
275
+ return result
276
+
277
+ PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
278
+
279
+ @property
280
+ def is_prerelease(self):
281
+ return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
282
+
283
+
284
+ def _match_prefix(x, y):
285
+ x = str(x)
286
+ y = str(y)
287
+ if x == y:
288
+ return True
289
+ if not x.startswith(y):
290
+ return False
291
+ n = len(y)
292
+ return x[n] == '.'
293
+
294
+
295
+ class NormalizedMatcher(Matcher):
296
+ version_class = NormalizedVersion
297
+
298
+ # value is either a callable or the name of a method
299
+ _operators = {
300
+ '~=': '_match_compatible',
301
+ '<': '_match_lt',
302
+ '>': '_match_gt',
303
+ '<=': '_match_le',
304
+ '>=': '_match_ge',
305
+ '==': '_match_eq',
306
+ '===': '_match_arbitrary',
307
+ '!=': '_match_ne',
308
+ }
309
+
310
+ def _adjust_local(self, version, constraint, prefix):
311
+ if prefix:
312
+ strip_local = '+' not in constraint and version._parts[-1]
313
+ else:
314
+ # both constraint and version are
315
+ # NormalizedVersion instances.
316
+ # If constraint does not have a local component,
317
+ # ensure the version doesn't, either.
318
+ strip_local = not constraint._parts[-1] and version._parts[-1]
319
+ if strip_local:
320
+ s = version._string.split('+', 1)[0]
321
+ version = self.version_class(s)
322
+ return version, constraint
323
+
324
+ def _match_lt(self, version, constraint, prefix):
325
+ version, constraint = self._adjust_local(version, constraint, prefix)
326
+ if version >= constraint:
327
+ return False
328
+ release_clause = constraint._release_clause
329
+ pfx = '.'.join([str(i) for i in release_clause])
330
+ return not _match_prefix(version, pfx)
331
+
332
+ def _match_gt(self, version, constraint, prefix):
333
+ version, constraint = self._adjust_local(version, constraint, prefix)
334
+ if version <= constraint:
335
+ return False
336
+ release_clause = constraint._release_clause
337
+ pfx = '.'.join([str(i) for i in release_clause])
338
+ return not _match_prefix(version, pfx)
339
+
340
+ def _match_le(self, version, constraint, prefix):
341
+ version, constraint = self._adjust_local(version, constraint, prefix)
342
+ return version <= constraint
343
+
344
+ def _match_ge(self, version, constraint, prefix):
345
+ version, constraint = self._adjust_local(version, constraint, prefix)
346
+ return version >= constraint
347
+
348
+ def _match_eq(self, version, constraint, prefix):
349
+ version, constraint = self._adjust_local(version, constraint, prefix)
350
+ if not prefix:
351
+ result = (version == constraint)
352
+ else:
353
+ result = _match_prefix(version, constraint)
354
+ return result
355
+
356
+ def _match_arbitrary(self, version, constraint, prefix):
357
+ return str(version) == str(constraint)
358
+
359
+ def _match_ne(self, version, constraint, prefix):
360
+ version, constraint = self._adjust_local(version, constraint, prefix)
361
+ if not prefix:
362
+ result = (version != constraint)
363
+ else:
364
+ result = not _match_prefix(version, constraint)
365
+ return result
366
+
367
+ def _match_compatible(self, version, constraint, prefix):
368
+ version, constraint = self._adjust_local(version, constraint, prefix)
369
+ if version == constraint:
370
+ return True
371
+ if version < constraint:
372
+ return False
373
+ # if not prefix:
374
+ # return True
375
+ release_clause = constraint._release_clause
376
+ if len(release_clause) > 1:
377
+ release_clause = release_clause[:-1]
378
+ pfx = '.'.join([str(i) for i in release_clause])
379
+ return _match_prefix(version, pfx)
380
+
381
+ _REPLACEMENTS = (
382
+ (re.compile('[.+-]$'), ''), # remove trailing puncts
383
+ (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
384
+ (re.compile('^[.-]'), ''), # remove leading puncts
385
+ (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
386
+ (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
387
+ (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
388
+ (re.compile('[.]{2,}'), '.'), # multiple runs of '.'
389
+ (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
390
+ (re.compile(r'\b(pre-alpha|prealpha)\b'),
391
+ 'pre.alpha'), # standardise
392
+ (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
393
+ )
394
+
395
+ _SUFFIX_REPLACEMENTS = (
396
+ (re.compile('^[:~._+-]+'), ''), # remove leading puncts
397
+ (re.compile('[,*")([\\]]'), ''), # remove unwanted chars
398
+ (re.compile('[~:+_ -]'), '.'), # replace illegal chars
399
+ (re.compile('[.]{2,}'), '.'), # multiple runs of '.'
400
+ (re.compile(r'\.$'), ''), # trailing '.'
401
+ )
402
+
403
+ _NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
404
+
405
+
406
+ def _suggest_semantic_version(s):
407
+ """
408
+ Try to suggest a semantic form for a version for which
409
+ _suggest_normalized_version couldn't come up with anything.
410
+ """
411
+ result = s.strip().lower()
412
+ for pat, repl in _REPLACEMENTS:
413
+ result = pat.sub(repl, result)
414
+ if not result:
415
+ result = '0.0.0'
416
+
417
+ # Now look for numeric prefix, and separate it out from
418
+ # the rest.
419
+ #import pdb; pdb.set_trace()
420
+ m = _NUMERIC_PREFIX.match(result)
421
+ if not m:
422
+ prefix = '0.0.0'
423
+ suffix = result
424
+ else:
425
+ prefix = m.groups()[0].split('.')
426
+ prefix = [int(i) for i in prefix]
427
+ while len(prefix) < 3:
428
+ prefix.append(0)
429
+ if len(prefix) == 3:
430
+ suffix = result[m.end():]
431
+ else:
432
+ suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
433
+ prefix = prefix[:3]
434
+ prefix = '.'.join([str(i) for i in prefix])
435
+ suffix = suffix.strip()
436
+ if suffix:
437
+ #import pdb; pdb.set_trace()
438
+ # massage the suffix.
439
+ for pat, repl in _SUFFIX_REPLACEMENTS:
440
+ suffix = pat.sub(repl, suffix)
441
+
442
+ if not suffix:
443
+ result = prefix
444
+ else:
445
+ sep = '-' if 'dev' in suffix else '+'
446
+ result = prefix + sep + suffix
447
+ if not is_semver(result):
448
+ result = None
449
+ return result
450
+
451
+
452
+ def _suggest_normalized_version(s):
453
+ """Suggest a normalized version close to the given version string.
454
+
455
+ If you have a version string that isn't rational (i.e. NormalizedVersion
456
+ doesn't like it) then you might be able to get an equivalent (or close)
457
+ rational version from this function.
458
+
459
+ This does a number of simple normalizations to the given string, based
460
+ on observation of versions currently in use on PyPI. Given a dump of
461
+ those version during PyCon 2009, 4287 of them:
462
+ - 2312 (53.93%) match NormalizedVersion without change
463
+ with the automatic suggestion
464
+ - 3474 (81.04%) match when using this suggestion method
465
+
466
+ @param s {str} An irrational version string.
467
+ @returns A rational version string, or None, if couldn't determine one.
468
+ """
469
+ try:
470
+ _normalized_key(s)
471
+ return s # already rational
472
+ except UnsupportedVersionError:
473
+ pass
474
+
475
+ rs = s.lower()
476
+
477
+ # part of this could use maketrans
478
+ for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
479
+ ('beta', 'b'), ('rc', 'c'), ('-final', ''),
480
+ ('-pre', 'c'),
481
+ ('-release', ''), ('.release', ''), ('-stable', ''),
482
+ ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
483
+ ('final', '')):
484
+ rs = rs.replace(orig, repl)
485
+
486
+ # if something ends with dev or pre, we add a 0
487
+ rs = re.sub(r"pre$", r"pre0", rs)
488
+ rs = re.sub(r"dev$", r"dev0", rs)
489
+
490
+ # if we have something like "b-2" or "a.2" at the end of the
491
+ # version, that is probably beta, alpha, etc
492
+ # let's remove the dash or dot
493
+ rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
494
+
495
+ # 1.0-dev-r371 -> 1.0.dev371
496
+ # 0.1-dev-r79 -> 0.1.dev79
497
+ rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
498
+
499
+ # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
500
+ rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
501
+
502
+ # Clean: v0.3, v1.0
503
+ if rs.startswith('v'):
504
+ rs = rs[1:]
505
+
506
+ # Clean leading '0's on numbers.
507
+ #TODO: unintended side-effect on, e.g., "2003.05.09"
508
+ # PyPI stats: 77 (~2%) better
509
+ rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
510
+
511
+ # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
512
+ # zero.
513
+ # PyPI stats: 245 (7.56%) better
514
+ rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
515
+
516
+ # the 'dev-rNNN' tag is a dev tag
517
+ rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
518
+
519
+ # clean the - when used as a pre delimiter
520
+ rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
521
+
522
+ # a terminal "dev" or "devel" can be changed into ".dev0"
523
+ rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
524
+
525
+ # a terminal "dev" can be changed into ".dev0"
526
+ rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
527
+
528
+ # a terminal "final" or "stable" can be removed
529
+ rs = re.sub(r"(final|stable)$", "", rs)
530
+
531
+ # The 'r' and the '-' tags are post release tags
532
+ # 0.4a1.r10 -> 0.4a1.post10
533
+ # 0.9.33-17222 -> 0.9.33.post17222
534
+ # 0.9.33-r17222 -> 0.9.33.post17222
535
+ rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
536
+
537
+ # Clean 'r' instead of 'dev' usage:
538
+ # 0.9.33+r17222 -> 0.9.33.dev17222
539
+ # 1.0dev123 -> 1.0.dev123
540
+ # 1.0.git123 -> 1.0.dev123
541
+ # 1.0.bzr123 -> 1.0.dev123
542
+ # 0.1a0dev.123 -> 0.1a0.dev123
543
+ # PyPI stats: ~150 (~4%) better
544
+ rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
545
+
546
+ # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
547
+ # 0.2.pre1 -> 0.2c1
548
+ # 0.2-c1 -> 0.2c1
549
+ # 1.0preview123 -> 1.0c123
550
+ # PyPI stats: ~21 (0.62%) better
551
+ rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
552
+
553
+ # Tcl/Tk uses "px" for their post release markers
554
+ rs = re.sub(r"p(\d+)$", r".post\1", rs)
555
+
556
+ try:
557
+ _normalized_key(rs)
558
+ except UnsupportedVersionError:
559
+ rs = None
560
+ return rs
561
+
562
+ #
563
+ # Legacy version processing (distribute-compatible)
564
+ #
565
+
566
+ _VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
567
+ _VERSION_REPLACE = {
568
+ 'pre': 'c',
569
+ 'preview': 'c',
570
+ '-': 'final-',
571
+ 'rc': 'c',
572
+ 'dev': '@',
573
+ '': None,
574
+ '.': None,
575
+ }
576
+
577
+
578
+ def _legacy_key(s):
579
+ def get_parts(s):
580
+ result = []
581
+ for p in _VERSION_PART.split(s.lower()):
582
+ p = _VERSION_REPLACE.get(p, p)
583
+ if p:
584
+ if '0' <= p[:1] <= '9':
585
+ p = p.zfill(8)
586
+ else:
587
+ p = '*' + p
588
+ result.append(p)
589
+ result.append('*final')
590
+ return result
591
+
592
+ result = []
593
+ for p in get_parts(s):
594
+ if p.startswith('*'):
595
+ if p < '*final':
596
+ while result and result[-1] == '*final-':
597
+ result.pop()
598
+ while result and result[-1] == '00000000':
599
+ result.pop()
600
+ result.append(p)
601
+ return tuple(result)
602
+
603
+
604
+ class LegacyVersion(Version):
605
+ def parse(self, s):
606
+ return _legacy_key(s)
607
+
608
+ @property
609
+ def is_prerelease(self):
610
+ result = False
611
+ for x in self._parts:
612
+ if (isinstance(x, string_types) and x.startswith('*') and
613
+ x < '*final'):
614
+ result = True
615
+ break
616
+ return result
617
+
618
+
619
+ class LegacyMatcher(Matcher):
620
+ version_class = LegacyVersion
621
+
622
+ _operators = dict(Matcher._operators)
623
+ _operators['~='] = '_match_compatible'
624
+
625
+ numeric_re = re.compile(r'^(\d+(\.\d+)*)')
626
+
627
+ def _match_compatible(self, version, constraint, prefix):
628
+ if version < constraint:
629
+ return False
630
+ m = self.numeric_re.match(str(constraint))
631
+ if not m:
632
+ logger.warning('Cannot compute compatible match for version %s '
633
+ ' and constraint %s', version, constraint)
634
+ return True
635
+ s = m.groups()[0]
636
+ if '.' in s:
637
+ s = s.rsplit('.', 1)[0]
638
+ return _match_prefix(version, s)
639
+
640
+ #
641
+ # Semantic versioning
642
+ #
643
+
644
+ _SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
645
+ r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
646
+ r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
647
+
648
+
649
+ def is_semver(s):
650
+ return _SEMVER_RE.match(s)
651
+
652
+
653
+ def _semantic_key(s):
654
+ def make_tuple(s, absent):
655
+ if s is None:
656
+ result = (absent,)
657
+ else:
658
+ parts = s[1:].split('.')
659
+ # We can't compare ints and strings on Python 3, so fudge it
660
+ # by zero-filling numeric values so simulate a numeric comparison
661
+ result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
662
+ return result
663
+
664
+ m = is_semver(s)
665
+ if not m:
666
+ raise UnsupportedVersionError(s)
667
+ groups = m.groups()
668
+ major, minor, patch = [int(i) for i in groups[:3]]
669
+ # choose the '|' and '*' so that versions sort correctly
670
+ pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
671
+ return (major, minor, patch), pre, build
672
+
673
+
674
+ class SemanticVersion(Version):
675
+ def parse(self, s):
676
+ return _semantic_key(s)
677
+
678
+ @property
679
+ def is_prerelease(self):
680
+ return self._parts[1][0] != '|'
681
+
682
+
683
+ class SemanticMatcher(Matcher):
684
+ version_class = SemanticVersion
685
+
686
+
687
+ class VersionScheme(object):
688
+ def __init__(self, key, matcher, suggester=None):
689
+ self.key = key
690
+ self.matcher = matcher
691
+ self.suggester = suggester
692
+
693
+ def is_valid_version(self, s):
694
+ try:
695
+ self.matcher.version_class(s)
696
+ result = True
697
+ except UnsupportedVersionError:
698
+ result = False
699
+ return result
700
+
701
+ def is_valid_matcher(self, s):
702
+ try:
703
+ self.matcher(s)
704
+ result = True
705
+ except UnsupportedVersionError:
706
+ result = False
707
+ return result
708
+
709
+ def is_valid_constraint_list(self, s):
710
+ """
711
+ Used for processing some metadata fields
712
+ """
713
+ # See issue #140. Be tolerant of a single trailing comma.
714
+ if s.endswith(','):
715
+ s = s[:-1]
716
+ return self.is_valid_matcher('dummy_name (%s)' % s)
717
+
718
+ def suggest(self, s):
719
+ if self.suggester is None:
720
+ result = None
721
+ else:
722
+ result = self.suggester(s)
723
+ return result
724
+
725
+ _SCHEMES = {
726
+ 'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
727
+ _suggest_normalized_version),
728
+ 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
729
+ 'semantic': VersionScheme(_semantic_key, SemanticMatcher,
730
+ _suggest_semantic_version),
731
+ }
732
+
733
+ _SCHEMES['default'] = _SCHEMES['normalized']
734
+
735
+
736
+ def get_scheme(name):
737
+ if name not in _SCHEMES:
738
+ raise ValueError('unknown scheme name: %r' % name)
739
+ return _SCHEMES[name]
venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py ADDED
@@ -0,0 +1,1053 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2013-2020 Vinay Sajip.
4
+ # Licensed to the Python Software Foundation under a contributor agreement.
5
+ # See LICENSE.txt and CONTRIBUTORS.txt.
6
+ #
7
+ from __future__ import unicode_literals
8
+
9
+ import base64
10
+ import codecs
11
+ import datetime
12
+ from email import message_from_file
13
+ import hashlib
14
+ import imp
15
+ import json
16
+ import logging
17
+ import os
18
+ import posixpath
19
+ import re
20
+ import shutil
21
+ import sys
22
+ import tempfile
23
+ import zipfile
24
+
25
+ from . import __version__, DistlibException
26
+ from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
27
+ from .database import InstalledDistribution
28
+ from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
29
+ LEGACY_METADATA_FILENAME)
30
+ from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
31
+ cached_property, get_cache_base, read_exports, tempdir,
32
+ get_platform)
33
+ from .version import NormalizedVersion, UnsupportedVersionError
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+ cache = None # created when needed
38
+
39
+ if hasattr(sys, 'pypy_version_info'): # pragma: no cover
40
+ IMP_PREFIX = 'pp'
41
+ elif sys.platform.startswith('java'): # pragma: no cover
42
+ IMP_PREFIX = 'jy'
43
+ elif sys.platform == 'cli': # pragma: no cover
44
+ IMP_PREFIX = 'ip'
45
+ else:
46
+ IMP_PREFIX = 'cp'
47
+
48
+ VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
49
+ if not VER_SUFFIX: # pragma: no cover
50
+ VER_SUFFIX = '%s%s' % sys.version_info[:2]
51
+ PYVER = 'py' + VER_SUFFIX
52
+ IMPVER = IMP_PREFIX + VER_SUFFIX
53
+
54
+ ARCH = get_platform().replace('-', '_').replace('.', '_')
55
+
56
+ ABI = sysconfig.get_config_var('SOABI')
57
+ if ABI and ABI.startswith('cpython-'):
58
+ ABI = ABI.replace('cpython-', 'cp').split('-')[0]
59
+ else:
60
+ def _derive_abi():
61
+ parts = ['cp', VER_SUFFIX]
62
+ if sysconfig.get_config_var('Py_DEBUG'):
63
+ parts.append('d')
64
+ if sysconfig.get_config_var('WITH_PYMALLOC'):
65
+ parts.append('m')
66
+ if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
67
+ parts.append('u')
68
+ return ''.join(parts)
69
+ ABI = _derive_abi()
70
+ del _derive_abi
71
+
72
+ FILENAME_RE = re.compile(r'''
73
+ (?P<nm>[^-]+)
74
+ -(?P<vn>\d+[^-]*)
75
+ (-(?P<bn>\d+[^-]*))?
76
+ -(?P<py>\w+\d+(\.\w+\d+)*)
77
+ -(?P<bi>\w+)
78
+ -(?P<ar>\w+(\.\w+)*)
79
+ \.whl$
80
+ ''', re.IGNORECASE | re.VERBOSE)
81
+
82
+ NAME_VERSION_RE = re.compile(r'''
83
+ (?P<nm>[^-]+)
84
+ -(?P<vn>\d+[^-]*)
85
+ (-(?P<bn>\d+[^-]*))?$
86
+ ''', re.IGNORECASE | re.VERBOSE)
87
+
88
+ SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
89
+ SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
90
+ SHEBANG_PYTHON = b'#!python'
91
+ SHEBANG_PYTHONW = b'#!pythonw'
92
+
93
+ if os.sep == '/':
94
+ to_posix = lambda o: o
95
+ else:
96
+ to_posix = lambda o: o.replace(os.sep, '/')
97
+
98
+
99
+ class Mounter(object):
100
+ def __init__(self):
101
+ self.impure_wheels = {}
102
+ self.libs = {}
103
+
104
+ def add(self, pathname, extensions):
105
+ self.impure_wheels[pathname] = extensions
106
+ self.libs.update(extensions)
107
+
108
+ def remove(self, pathname):
109
+ extensions = self.impure_wheels.pop(pathname)
110
+ for k, v in extensions:
111
+ if k in self.libs:
112
+ del self.libs[k]
113
+
114
+ def find_module(self, fullname, path=None):
115
+ if fullname in self.libs:
116
+ result = self
117
+ else:
118
+ result = None
119
+ return result
120
+
121
+ def load_module(self, fullname):
122
+ if fullname in sys.modules:
123
+ result = sys.modules[fullname]
124
+ else:
125
+ if fullname not in self.libs:
126
+ raise ImportError('unable to find extension for %s' % fullname)
127
+ result = imp.load_dynamic(fullname, self.libs[fullname])
128
+ result.__loader__ = self
129
+ parts = fullname.rsplit('.', 1)
130
+ if len(parts) > 1:
131
+ result.__package__ = parts[0]
132
+ return result
133
+
134
+ _hook = Mounter()
135
+
136
+
137
+ class Wheel(object):
138
+ """
139
+ Class to build and install from Wheel files (PEP 427).
140
+ """
141
+
142
+ wheel_version = (1, 1)
143
+ hash_kind = 'sha256'
144
+
145
+ def __init__(self, filename=None, sign=False, verify=False):
146
+ """
147
+ Initialise an instance using a (valid) filename.
148
+ """
149
+ self.sign = sign
150
+ self.should_verify = verify
151
+ self.buildver = ''
152
+ self.pyver = [PYVER]
153
+ self.abi = ['none']
154
+ self.arch = ['any']
155
+ self.dirname = os.getcwd()
156
+ if filename is None:
157
+ self.name = 'dummy'
158
+ self.version = '0.1'
159
+ self._filename = self.filename
160
+ else:
161
+ m = NAME_VERSION_RE.match(filename)
162
+ if m:
163
+ info = m.groupdict('')
164
+ self.name = info['nm']
165
+ # Reinstate the local version separator
166
+ self.version = info['vn'].replace('_', '-')
167
+ self.buildver = info['bn']
168
+ self._filename = self.filename
169
+ else:
170
+ dirname, filename = os.path.split(filename)
171
+ m = FILENAME_RE.match(filename)
172
+ if not m:
173
+ raise DistlibException('Invalid name or '
174
+ 'filename: %r' % filename)
175
+ if dirname:
176
+ self.dirname = os.path.abspath(dirname)
177
+ self._filename = filename
178
+ info = m.groupdict('')
179
+ self.name = info['nm']
180
+ self.version = info['vn']
181
+ self.buildver = info['bn']
182
+ self.pyver = info['py'].split('.')
183
+ self.abi = info['bi'].split('.')
184
+ self.arch = info['ar'].split('.')
185
+
186
+ @property
187
+ def filename(self):
188
+ """
189
+ Build and return a filename from the various components.
190
+ """
191
+ if self.buildver:
192
+ buildver = '-' + self.buildver
193
+ else:
194
+ buildver = ''
195
+ pyver = '.'.join(self.pyver)
196
+ abi = '.'.join(self.abi)
197
+ arch = '.'.join(self.arch)
198
+ # replace - with _ as a local version separator
199
+ version = self.version.replace('-', '_')
200
+ return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
201
+ pyver, abi, arch)
202
+
203
+ @property
204
+ def exists(self):
205
+ path = os.path.join(self.dirname, self.filename)
206
+ return os.path.isfile(path)
207
+
208
+ @property
209
+ def tags(self):
210
+ for pyver in self.pyver:
211
+ for abi in self.abi:
212
+ for arch in self.arch:
213
+ yield pyver, abi, arch
214
+
215
+ @cached_property
216
+ def metadata(self):
217
+ pathname = os.path.join(self.dirname, self.filename)
218
+ name_ver = '%s-%s' % (self.name, self.version)
219
+ info_dir = '%s.dist-info' % name_ver
220
+ wrapper = codecs.getreader('utf-8')
221
+ with ZipFile(pathname, 'r') as zf:
222
+ wheel_metadata = self.get_wheel_metadata(zf)
223
+ wv = wheel_metadata['Wheel-Version'].split('.', 1)
224
+ file_version = tuple([int(i) for i in wv])
225
+ # if file_version < (1, 1):
226
+ # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
227
+ # LEGACY_METADATA_FILENAME]
228
+ # else:
229
+ # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
230
+ fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
231
+ result = None
232
+ for fn in fns:
233
+ try:
234
+ metadata_filename = posixpath.join(info_dir, fn)
235
+ with zf.open(metadata_filename) as bf:
236
+ wf = wrapper(bf)
237
+ result = Metadata(fileobj=wf)
238
+ if result:
239
+ break
240
+ except KeyError:
241
+ pass
242
+ if not result:
243
+ raise ValueError('Invalid wheel, because metadata is '
244
+ 'missing: looked in %s' % ', '.join(fns))
245
+ return result
246
+
247
+ def get_wheel_metadata(self, zf):
248
+ name_ver = '%s-%s' % (self.name, self.version)
249
+ info_dir = '%s.dist-info' % name_ver
250
+ metadata_filename = posixpath.join(info_dir, 'WHEEL')
251
+ with zf.open(metadata_filename) as bf:
252
+ wf = codecs.getreader('utf-8')(bf)
253
+ message = message_from_file(wf)
254
+ return dict(message)
255
+
256
+ @cached_property
257
+ def info(self):
258
+ pathname = os.path.join(self.dirname, self.filename)
259
+ with ZipFile(pathname, 'r') as zf:
260
+ result = self.get_wheel_metadata(zf)
261
+ return result
262
+
263
+ def process_shebang(self, data):
264
+ m = SHEBANG_RE.match(data)
265
+ if m:
266
+ end = m.end()
267
+ shebang, data_after_shebang = data[:end], data[end:]
268
+ # Preserve any arguments after the interpreter
269
+ if b'pythonw' in shebang.lower():
270
+ shebang_python = SHEBANG_PYTHONW
271
+ else:
272
+ shebang_python = SHEBANG_PYTHON
273
+ m = SHEBANG_DETAIL_RE.match(shebang)
274
+ if m:
275
+ args = b' ' + m.groups()[-1]
276
+ else:
277
+ args = b''
278
+ shebang = shebang_python + args
279
+ data = shebang + data_after_shebang
280
+ else:
281
+ cr = data.find(b'\r')
282
+ lf = data.find(b'\n')
283
+ if cr < 0 or cr > lf:
284
+ term = b'\n'
285
+ else:
286
+ if data[cr:cr + 2] == b'\r\n':
287
+ term = b'\r\n'
288
+ else:
289
+ term = b'\r'
290
+ data = SHEBANG_PYTHON + term + data
291
+ return data
292
+
293
+ def get_hash(self, data, hash_kind=None):
294
+ if hash_kind is None:
295
+ hash_kind = self.hash_kind
296
+ try:
297
+ hasher = getattr(hashlib, hash_kind)
298
+ except AttributeError:
299
+ raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
300
+ result = hasher(data).digest()
301
+ result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
302
+ return hash_kind, result
303
+
304
+ def write_record(self, records, record_path, base):
305
+ records = list(records) # make a copy, as mutated
306
+ p = to_posix(os.path.relpath(record_path, base))
307
+ records.append((p, '', ''))
308
+ with CSVWriter(record_path) as writer:
309
+ for row in records:
310
+ writer.writerow(row)
311
+
312
+ def write_records(self, info, libdir, archive_paths):
313
+ records = []
314
+ distinfo, info_dir = info
315
+ hasher = getattr(hashlib, self.hash_kind)
316
+ for ap, p in archive_paths:
317
+ with open(p, 'rb') as f:
318
+ data = f.read()
319
+ digest = '%s=%s' % self.get_hash(data)
320
+ size = os.path.getsize(p)
321
+ records.append((ap, digest, size))
322
+
323
+ p = os.path.join(distinfo, 'RECORD')
324
+ self.write_record(records, p, libdir)
325
+ ap = to_posix(os.path.join(info_dir, 'RECORD'))
326
+ archive_paths.append((ap, p))
327
+
328
+ def build_zip(self, pathname, archive_paths):
329
+ with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
330
+ for ap, p in archive_paths:
331
+ logger.debug('Wrote %s to %s in wheel', p, ap)
332
+ zf.write(p, ap)
333
+
334
+ def build(self, paths, tags=None, wheel_version=None):
335
+ """
336
+ Build a wheel from files in specified paths, and use any specified tags
337
+ when determining the name of the wheel.
338
+ """
339
+ if tags is None:
340
+ tags = {}
341
+
342
+ libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
343
+ if libkey == 'platlib':
344
+ is_pure = 'false'
345
+ default_pyver = [IMPVER]
346
+ default_abi = [ABI]
347
+ default_arch = [ARCH]
348
+ else:
349
+ is_pure = 'true'
350
+ default_pyver = [PYVER]
351
+ default_abi = ['none']
352
+ default_arch = ['any']
353
+
354
+ self.pyver = tags.get('pyver', default_pyver)
355
+ self.abi = tags.get('abi', default_abi)
356
+ self.arch = tags.get('arch', default_arch)
357
+
358
+ libdir = paths[libkey]
359
+
360
+ name_ver = '%s-%s' % (self.name, self.version)
361
+ data_dir = '%s.data' % name_ver
362
+ info_dir = '%s.dist-info' % name_ver
363
+
364
+ archive_paths = []
365
+
366
+ # First, stuff which is not in site-packages
367
+ for key in ('data', 'headers', 'scripts'):
368
+ if key not in paths:
369
+ continue
370
+ path = paths[key]
371
+ if os.path.isdir(path):
372
+ for root, dirs, files in os.walk(path):
373
+ for fn in files:
374
+ p = fsdecode(os.path.join(root, fn))
375
+ rp = os.path.relpath(p, path)
376
+ ap = to_posix(os.path.join(data_dir, key, rp))
377
+ archive_paths.append((ap, p))
378
+ if key == 'scripts' and not p.endswith('.exe'):
379
+ with open(p, 'rb') as f:
380
+ data = f.read()
381
+ data = self.process_shebang(data)
382
+ with open(p, 'wb') as f:
383
+ f.write(data)
384
+
385
+ # Now, stuff which is in site-packages, other than the
386
+ # distinfo stuff.
387
+ path = libdir
388
+ distinfo = None
389
+ for root, dirs, files in os.walk(path):
390
+ if root == path:
391
+ # At the top level only, save distinfo for later
392
+ # and skip it for now
393
+ for i, dn in enumerate(dirs):
394
+ dn = fsdecode(dn)
395
+ if dn.endswith('.dist-info'):
396
+ distinfo = os.path.join(root, dn)
397
+ del dirs[i]
398
+ break
399
+ assert distinfo, '.dist-info directory expected, not found'
400
+
401
+ for fn in files:
402
+ # comment out next suite to leave .pyc files in
403
+ if fsdecode(fn).endswith(('.pyc', '.pyo')):
404
+ continue
405
+ p = os.path.join(root, fn)
406
+ rp = to_posix(os.path.relpath(p, path))
407
+ archive_paths.append((rp, p))
408
+
409
+ # Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
410
+ files = os.listdir(distinfo)
411
+ for fn in files:
412
+ if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
413
+ p = fsdecode(os.path.join(distinfo, fn))
414
+ ap = to_posix(os.path.join(info_dir, fn))
415
+ archive_paths.append((ap, p))
416
+
417
+ wheel_metadata = [
418
+ 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
419
+ 'Generator: distlib %s' % __version__,
420
+ 'Root-Is-Purelib: %s' % is_pure,
421
+ ]
422
+ for pyver, abi, arch in self.tags:
423
+ wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
424
+ p = os.path.join(distinfo, 'WHEEL')
425
+ with open(p, 'w') as f:
426
+ f.write('\n'.join(wheel_metadata))
427
+ ap = to_posix(os.path.join(info_dir, 'WHEEL'))
428
+ archive_paths.append((ap, p))
429
+
430
+ # sort the entries by archive path. Not needed by any spec, but it
431
+ # keeps the archive listing and RECORD tidier than they would otherwise
432
+ # be. Use the number of path segments to keep directory entries together,
433
+ # and keep the dist-info stuff at the end.
434
+ def sorter(t):
435
+ ap = t[0]
436
+ n = ap.count('/')
437
+ if '.dist-info' in ap:
438
+ n += 10000
439
+ return (n, ap)
440
+ archive_paths = sorted(archive_paths, key=sorter)
441
+
442
+ # Now, at last, RECORD.
443
+ # Paths in here are archive paths - nothing else makes sense.
444
+ self.write_records((distinfo, info_dir), libdir, archive_paths)
445
+ # Now, ready to build the zip file
446
+ pathname = os.path.join(self.dirname, self.filename)
447
+ self.build_zip(pathname, archive_paths)
448
+ return pathname
449
+
450
+ def skip_entry(self, arcname):
451
+ """
452
+ Determine whether an archive entry should be skipped when verifying
453
+ or installing.
454
+ """
455
+ # The signature file won't be in RECORD,
456
+ # and we don't currently don't do anything with it
457
+ # We also skip directories, as they won't be in RECORD
458
+ # either. See:
459
+ #
460
+ # https://github.com/pypa/wheel/issues/294
461
+ # https://github.com/pypa/wheel/issues/287
462
+ # https://github.com/pypa/wheel/pull/289
463
+ #
464
+ return arcname.endswith(('/', '/RECORD.jws'))
465
+
466
+ def install(self, paths, maker, **kwargs):
467
+ """
468
+ Install a wheel to the specified paths. If kwarg ``warner`` is
469
+ specified, it should be a callable, which will be called with two
470
+ tuples indicating the wheel version of this software and the wheel
471
+ version in the file, if there is a discrepancy in the versions.
472
+ This can be used to issue any warnings to raise any exceptions.
473
+ If kwarg ``lib_only`` is True, only the purelib/platlib files are
474
+ installed, and the headers, scripts, data and dist-info metadata are
475
+ not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
476
+ bytecode will try to use file-hash based invalidation (PEP-552) on
477
+ supported interpreter versions (CPython 2.7+).
478
+
479
+ The return value is a :class:`InstalledDistribution` instance unless
480
+ ``options.lib_only`` is True, in which case the return value is ``None``.
481
+ """
482
+
483
+ dry_run = maker.dry_run
484
+ warner = kwargs.get('warner')
485
+ lib_only = kwargs.get('lib_only', False)
486
+ bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
487
+
488
+ pathname = os.path.join(self.dirname, self.filename)
489
+ name_ver = '%s-%s' % (self.name, self.version)
490
+ data_dir = '%s.data' % name_ver
491
+ info_dir = '%s.dist-info' % name_ver
492
+
493
+ metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
494
+ wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
495
+ record_name = posixpath.join(info_dir, 'RECORD')
496
+
497
+ wrapper = codecs.getreader('utf-8')
498
+
499
+ with ZipFile(pathname, 'r') as zf:
500
+ with zf.open(wheel_metadata_name) as bwf:
501
+ wf = wrapper(bwf)
502
+ message = message_from_file(wf)
503
+ wv = message['Wheel-Version'].split('.', 1)
504
+ file_version = tuple([int(i) for i in wv])
505
+ if (file_version != self.wheel_version) and warner:
506
+ warner(self.wheel_version, file_version)
507
+
508
+ if message['Root-Is-Purelib'] == 'true':
509
+ libdir = paths['purelib']
510
+ else:
511
+ libdir = paths['platlib']
512
+
513
+ records = {}
514
+ with zf.open(record_name) as bf:
515
+ with CSVReader(stream=bf) as reader:
516
+ for row in reader:
517
+ p = row[0]
518
+ records[p] = row
519
+
520
+ data_pfx = posixpath.join(data_dir, '')
521
+ info_pfx = posixpath.join(info_dir, '')
522
+ script_pfx = posixpath.join(data_dir, 'scripts', '')
523
+
524
+ # make a new instance rather than a copy of maker's,
525
+ # as we mutate it
526
+ fileop = FileOperator(dry_run=dry_run)
527
+ fileop.record = True # so we can rollback if needed
528
+
529
+ bc = not sys.dont_write_bytecode # Double negatives. Lovely!
530
+
531
+ outfiles = [] # for RECORD writing
532
+
533
+ # for script copying/shebang processing
534
+ workdir = tempfile.mkdtemp()
535
+ # set target dir later
536
+ # we default add_launchers to False, as the
537
+ # Python Launcher should be used instead
538
+ maker.source_dir = workdir
539
+ maker.target_dir = None
540
+ try:
541
+ for zinfo in zf.infolist():
542
+ arcname = zinfo.filename
543
+ if isinstance(arcname, text_type):
544
+ u_arcname = arcname
545
+ else:
546
+ u_arcname = arcname.decode('utf-8')
547
+ if self.skip_entry(u_arcname):
548
+ continue
549
+ row = records[u_arcname]
550
+ if row[2] and str(zinfo.file_size) != row[2]:
551
+ raise DistlibException('size mismatch for '
552
+ '%s' % u_arcname)
553
+ if row[1]:
554
+ kind, value = row[1].split('=', 1)
555
+ with zf.open(arcname) as bf:
556
+ data = bf.read()
557
+ _, digest = self.get_hash(data, kind)
558
+ if digest != value:
559
+ raise DistlibException('digest mismatch for '
560
+ '%s' % arcname)
561
+
562
+ if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
563
+ logger.debug('lib_only: skipping %s', u_arcname)
564
+ continue
565
+ is_script = (u_arcname.startswith(script_pfx)
566
+ and not u_arcname.endswith('.exe'))
567
+
568
+ if u_arcname.startswith(data_pfx):
569
+ _, where, rp = u_arcname.split('/', 2)
570
+ outfile = os.path.join(paths[where], convert_path(rp))
571
+ else:
572
+ # meant for site-packages.
573
+ if u_arcname in (wheel_metadata_name, record_name):
574
+ continue
575
+ outfile = os.path.join(libdir, convert_path(u_arcname))
576
+ if not is_script:
577
+ with zf.open(arcname) as bf:
578
+ fileop.copy_stream(bf, outfile)
579
+ # Issue #147: permission bits aren't preserved. Using
580
+ # zf.extract(zinfo, libdir) should have worked, but didn't,
581
+ # see https://www.thetopsites.net/article/53834422.shtml
582
+ # So ... manually preserve permission bits as given in zinfo
583
+ if os.name == 'posix':
584
+ # just set the normal permission bits
585
+ os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF)
586
+ outfiles.append(outfile)
587
+ # Double check the digest of the written file
588
+ if not dry_run and row[1]:
589
+ with open(outfile, 'rb') as bf:
590
+ data = bf.read()
591
+ _, newdigest = self.get_hash(data, kind)
592
+ if newdigest != digest:
593
+ raise DistlibException('digest mismatch '
594
+ 'on write for '
595
+ '%s' % outfile)
596
+ if bc and outfile.endswith('.py'):
597
+ try:
598
+ pyc = fileop.byte_compile(outfile,
599
+ hashed_invalidation=bc_hashed_invalidation)
600
+ outfiles.append(pyc)
601
+ except Exception:
602
+ # Don't give up if byte-compilation fails,
603
+ # but log it and perhaps warn the user
604
+ logger.warning('Byte-compilation failed',
605
+ exc_info=True)
606
+ else:
607
+ fn = os.path.basename(convert_path(arcname))
608
+ workname = os.path.join(workdir, fn)
609
+ with zf.open(arcname) as bf:
610
+ fileop.copy_stream(bf, workname)
611
+
612
+ dn, fn = os.path.split(outfile)
613
+ maker.target_dir = dn
614
+ filenames = maker.make(fn)
615
+ fileop.set_executable_mode(filenames)
616
+ outfiles.extend(filenames)
617
+
618
+ if lib_only:
619
+ logger.debug('lib_only: returning None')
620
+ dist = None
621
+ else:
622
+ # Generate scripts
623
+
624
+ # Try to get pydist.json so we can see if there are
625
+ # any commands to generate. If this fails (e.g. because
626
+ # of a legacy wheel), log a warning but don't give up.
627
+ commands = None
628
+ file_version = self.info['Wheel-Version']
629
+ if file_version == '1.0':
630
+ # Use legacy info
631
+ ep = posixpath.join(info_dir, 'entry_points.txt')
632
+ try:
633
+ with zf.open(ep) as bwf:
634
+ epdata = read_exports(bwf)
635
+ commands = {}
636
+ for key in ('console', 'gui'):
637
+ k = '%s_scripts' % key
638
+ if k in epdata:
639
+ commands['wrap_%s' % key] = d = {}
640
+ for v in epdata[k].values():
641
+ s = '%s:%s' % (v.prefix, v.suffix)
642
+ if v.flags:
643
+ s += ' [%s]' % ','.join(v.flags)
644
+ d[v.name] = s
645
+ except Exception:
646
+ logger.warning('Unable to read legacy script '
647
+ 'metadata, so cannot generate '
648
+ 'scripts')
649
+ else:
650
+ try:
651
+ with zf.open(metadata_name) as bwf:
652
+ wf = wrapper(bwf)
653
+ commands = json.load(wf).get('extensions')
654
+ if commands:
655
+ commands = commands.get('python.commands')
656
+ except Exception:
657
+ logger.warning('Unable to read JSON metadata, so '
658
+ 'cannot generate scripts')
659
+ if commands:
660
+ console_scripts = commands.get('wrap_console', {})
661
+ gui_scripts = commands.get('wrap_gui', {})
662
+ if console_scripts or gui_scripts:
663
+ script_dir = paths.get('scripts', '')
664
+ if not os.path.isdir(script_dir):
665
+ raise ValueError('Valid script path not '
666
+ 'specified')
667
+ maker.target_dir = script_dir
668
+ for k, v in console_scripts.items():
669
+ script = '%s = %s' % (k, v)
670
+ filenames = maker.make(script)
671
+ fileop.set_executable_mode(filenames)
672
+
673
+ if gui_scripts:
674
+ options = {'gui': True }
675
+ for k, v in gui_scripts.items():
676
+ script = '%s = %s' % (k, v)
677
+ filenames = maker.make(script, options)
678
+ fileop.set_executable_mode(filenames)
679
+
680
+ p = os.path.join(libdir, info_dir)
681
+ dist = InstalledDistribution(p)
682
+
683
+ # Write SHARED
684
+ paths = dict(paths) # don't change passed in dict
685
+ del paths['purelib']
686
+ del paths['platlib']
687
+ paths['lib'] = libdir
688
+ p = dist.write_shared_locations(paths, dry_run)
689
+ if p:
690
+ outfiles.append(p)
691
+
692
+ # Write RECORD
693
+ dist.write_installed_files(outfiles, paths['prefix'],
694
+ dry_run)
695
+ return dist
696
+ except Exception: # pragma: no cover
697
+ logger.exception('installation failed.')
698
+ fileop.rollback()
699
+ raise
700
+ finally:
701
+ shutil.rmtree(workdir)
702
+
703
+ def _get_dylib_cache(self):
704
+ global cache
705
+ if cache is None:
706
+ # Use native string to avoid issues on 2.x: see Python #20140.
707
+ base = os.path.join(get_cache_base(), str('dylib-cache'),
708
+ '%s.%s' % sys.version_info[:2])
709
+ cache = Cache(base)
710
+ return cache
711
+
712
+ def _get_extensions(self):
713
+ pathname = os.path.join(self.dirname, self.filename)
714
+ name_ver = '%s-%s' % (self.name, self.version)
715
+ info_dir = '%s.dist-info' % name_ver
716
+ arcname = posixpath.join(info_dir, 'EXTENSIONS')
717
+ wrapper = codecs.getreader('utf-8')
718
+ result = []
719
+ with ZipFile(pathname, 'r') as zf:
720
+ try:
721
+ with zf.open(arcname) as bf:
722
+ wf = wrapper(bf)
723
+ extensions = json.load(wf)
724
+ cache = self._get_dylib_cache()
725
+ prefix = cache.prefix_to_dir(pathname)
726
+ cache_base = os.path.join(cache.base, prefix)
727
+ if not os.path.isdir(cache_base):
728
+ os.makedirs(cache_base)
729
+ for name, relpath in extensions.items():
730
+ dest = os.path.join(cache_base, convert_path(relpath))
731
+ if not os.path.exists(dest):
732
+ extract = True
733
+ else:
734
+ file_time = os.stat(dest).st_mtime
735
+ file_time = datetime.datetime.fromtimestamp(file_time)
736
+ info = zf.getinfo(relpath)
737
+ wheel_time = datetime.datetime(*info.date_time)
738
+ extract = wheel_time > file_time
739
+ if extract:
740
+ zf.extract(relpath, cache_base)
741
+ result.append((name, dest))
742
+ except KeyError:
743
+ pass
744
+ return result
745
+
746
+ def is_compatible(self):
747
+ """
748
+ Determine if a wheel is compatible with the running system.
749
+ """
750
+ return is_compatible(self)
751
+
752
+ def is_mountable(self):
753
+ """
754
+ Determine if a wheel is asserted as mountable by its metadata.
755
+ """
756
+ return True # for now - metadata details TBD
757
+
758
+ def mount(self, append=False):
759
+ pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
760
+ if not self.is_compatible():
761
+ msg = 'Wheel %s not compatible with this Python.' % pathname
762
+ raise DistlibException(msg)
763
+ if not self.is_mountable():
764
+ msg = 'Wheel %s is marked as not mountable.' % pathname
765
+ raise DistlibException(msg)
766
+ if pathname in sys.path:
767
+ logger.debug('%s already in path', pathname)
768
+ else:
769
+ if append:
770
+ sys.path.append(pathname)
771
+ else:
772
+ sys.path.insert(0, pathname)
773
+ extensions = self._get_extensions()
774
+ if extensions:
775
+ if _hook not in sys.meta_path:
776
+ sys.meta_path.append(_hook)
777
+ _hook.add(pathname, extensions)
778
+
779
+ def unmount(self):
780
+ pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
781
+ if pathname not in sys.path:
782
+ logger.debug('%s not in path', pathname)
783
+ else:
784
+ sys.path.remove(pathname)
785
+ if pathname in _hook.impure_wheels:
786
+ _hook.remove(pathname)
787
+ if not _hook.impure_wheels:
788
+ if _hook in sys.meta_path:
789
+ sys.meta_path.remove(_hook)
790
+
791
+ def verify(self):
792
+ pathname = os.path.join(self.dirname, self.filename)
793
+ name_ver = '%s-%s' % (self.name, self.version)
794
+ data_dir = '%s.data' % name_ver
795
+ info_dir = '%s.dist-info' % name_ver
796
+
797
+ metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
798
+ wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
799
+ record_name = posixpath.join(info_dir, 'RECORD')
800
+
801
+ wrapper = codecs.getreader('utf-8')
802
+
803
+ with ZipFile(pathname, 'r') as zf:
804
+ with zf.open(wheel_metadata_name) as bwf:
805
+ wf = wrapper(bwf)
806
+ message = message_from_file(wf)
807
+ wv = message['Wheel-Version'].split('.', 1)
808
+ file_version = tuple([int(i) for i in wv])
809
+ # TODO version verification
810
+
811
+ records = {}
812
+ with zf.open(record_name) as bf:
813
+ with CSVReader(stream=bf) as reader:
814
+ for row in reader:
815
+ p = row[0]
816
+ records[p] = row
817
+
818
+ for zinfo in zf.infolist():
819
+ arcname = zinfo.filename
820
+ if isinstance(arcname, text_type):
821
+ u_arcname = arcname
822
+ else:
823
+ u_arcname = arcname.decode('utf-8')
824
+ # See issue #115: some wheels have .. in their entries, but
825
+ # in the filename ... e.g. __main__..py ! So the check is
826
+ # updated to look for .. in the directory portions
827
+ p = u_arcname.split('/')
828
+ if '..' in p:
829
+ raise DistlibException('invalid entry in '
830
+ 'wheel: %r' % u_arcname)
831
+
832
+ if self.skip_entry(u_arcname):
833
+ continue
834
+ row = records[u_arcname]
835
+ if row[2] and str(zinfo.file_size) != row[2]:
836
+ raise DistlibException('size mismatch for '
837
+ '%s' % u_arcname)
838
+ if row[1]:
839
+ kind, value = row[1].split('=', 1)
840
+ with zf.open(arcname) as bf:
841
+ data = bf.read()
842
+ _, digest = self.get_hash(data, kind)
843
+ if digest != value:
844
+ raise DistlibException('digest mismatch for '
845
+ '%s' % arcname)
846
+
847
+ def update(self, modifier, dest_dir=None, **kwargs):
848
+ """
849
+ Update the contents of a wheel in a generic way. The modifier should
850
+ be a callable which expects a dictionary argument: its keys are
851
+ archive-entry paths, and its values are absolute filesystem paths
852
+ where the contents the corresponding archive entries can be found. The
853
+ modifier is free to change the contents of the files pointed to, add
854
+ new entries and remove entries, before returning. This method will
855
+ extract the entire contents of the wheel to a temporary location, call
856
+ the modifier, and then use the passed (and possibly updated)
857
+ dictionary to write a new wheel. If ``dest_dir`` is specified, the new
858
+ wheel is written there -- otherwise, the original wheel is overwritten.
859
+
860
+ The modifier should return True if it updated the wheel, else False.
861
+ This method returns the same value the modifier returns.
862
+ """
863
+
864
+ def get_version(path_map, info_dir):
865
+ version = path = None
866
+ key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
867
+ if key not in path_map:
868
+ key = '%s/PKG-INFO' % info_dir
869
+ if key in path_map:
870
+ path = path_map[key]
871
+ version = Metadata(path=path).version
872
+ return version, path
873
+
874
+ def update_version(version, path):
875
+ updated = None
876
+ try:
877
+ v = NormalizedVersion(version)
878
+ i = version.find('-')
879
+ if i < 0:
880
+ updated = '%s+1' % version
881
+ else:
882
+ parts = [int(s) for s in version[i + 1:].split('.')]
883
+ parts[-1] += 1
884
+ updated = '%s+%s' % (version[:i],
885
+ '.'.join(str(i) for i in parts))
886
+ except UnsupportedVersionError:
887
+ logger.debug('Cannot update non-compliant (PEP-440) '
888
+ 'version %r', version)
889
+ if updated:
890
+ md = Metadata(path=path)
891
+ md.version = updated
892
+ legacy = path.endswith(LEGACY_METADATA_FILENAME)
893
+ md.write(path=path, legacy=legacy)
894
+ logger.debug('Version updated from %r to %r', version,
895
+ updated)
896
+
897
+ pathname = os.path.join(self.dirname, self.filename)
898
+ name_ver = '%s-%s' % (self.name, self.version)
899
+ info_dir = '%s.dist-info' % name_ver
900
+ record_name = posixpath.join(info_dir, 'RECORD')
901
+ with tempdir() as workdir:
902
+ with ZipFile(pathname, 'r') as zf:
903
+ path_map = {}
904
+ for zinfo in zf.infolist():
905
+ arcname = zinfo.filename
906
+ if isinstance(arcname, text_type):
907
+ u_arcname = arcname
908
+ else:
909
+ u_arcname = arcname.decode('utf-8')
910
+ if u_arcname == record_name:
911
+ continue
912
+ if '..' in u_arcname:
913
+ raise DistlibException('invalid entry in '
914
+ 'wheel: %r' % u_arcname)
915
+ zf.extract(zinfo, workdir)
916
+ path = os.path.join(workdir, convert_path(u_arcname))
917
+ path_map[u_arcname] = path
918
+
919
+ # Remember the version.
920
+ original_version, _ = get_version(path_map, info_dir)
921
+ # Files extracted. Call the modifier.
922
+ modified = modifier(path_map, **kwargs)
923
+ if modified:
924
+ # Something changed - need to build a new wheel.
925
+ current_version, path = get_version(path_map, info_dir)
926
+ if current_version and (current_version == original_version):
927
+ # Add or update local version to signify changes.
928
+ update_version(current_version, path)
929
+ # Decide where the new wheel goes.
930
+ if dest_dir is None:
931
+ fd, newpath = tempfile.mkstemp(suffix='.whl',
932
+ prefix='wheel-update-',
933
+ dir=workdir)
934
+ os.close(fd)
935
+ else:
936
+ if not os.path.isdir(dest_dir):
937
+ raise DistlibException('Not a directory: %r' % dest_dir)
938
+ newpath = os.path.join(dest_dir, self.filename)
939
+ archive_paths = list(path_map.items())
940
+ distinfo = os.path.join(workdir, info_dir)
941
+ info = distinfo, info_dir
942
+ self.write_records(info, workdir, archive_paths)
943
+ self.build_zip(newpath, archive_paths)
944
+ if dest_dir is None:
945
+ shutil.copyfile(newpath, pathname)
946
+ return modified
947
+
948
+ def _get_glibc_version():
949
+ import platform
950
+ ver = platform.libc_ver()
951
+ result = []
952
+ if ver[0] == 'glibc':
953
+ for s in ver[1].split('.'):
954
+ result.append(int(s) if s.isdigit() else 0)
955
+ result = tuple(result)
956
+ return result
957
+
958
+ def compatible_tags():
959
+ """
960
+ Return (pyver, abi, arch) tuples compatible with this Python.
961
+ """
962
+ versions = [VER_SUFFIX]
963
+ major = VER_SUFFIX[0]
964
+ for minor in range(sys.version_info[1] - 1, - 1, -1):
965
+ versions.append(''.join([major, str(minor)]))
966
+
967
+ abis = []
968
+ for suffix, _, _ in imp.get_suffixes():
969
+ if suffix.startswith('.abi'):
970
+ abis.append(suffix.split('.', 2)[1])
971
+ abis.sort()
972
+ if ABI != 'none':
973
+ abis.insert(0, ABI)
974
+ abis.append('none')
975
+ result = []
976
+
977
+ arches = [ARCH]
978
+ if sys.platform == 'darwin':
979
+ m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
980
+ if m:
981
+ name, major, minor, arch = m.groups()
982
+ minor = int(minor)
983
+ matches = [arch]
984
+ if arch in ('i386', 'ppc'):
985
+ matches.append('fat')
986
+ if arch in ('i386', 'ppc', 'x86_64'):
987
+ matches.append('fat3')
988
+ if arch in ('ppc64', 'x86_64'):
989
+ matches.append('fat64')
990
+ if arch in ('i386', 'x86_64'):
991
+ matches.append('intel')
992
+ if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
993
+ matches.append('universal')
994
+ while minor >= 0:
995
+ for match in matches:
996
+ s = '%s_%s_%s_%s' % (name, major, minor, match)
997
+ if s != ARCH: # already there
998
+ arches.append(s)
999
+ minor -= 1
1000
+
1001
+ # Most specific - our Python version, ABI and arch
1002
+ for abi in abis:
1003
+ for arch in arches:
1004
+ result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
1005
+ # manylinux
1006
+ if abi != 'none' and sys.platform.startswith('linux'):
1007
+ arch = arch.replace('linux_', '')
1008
+ parts = _get_glibc_version()
1009
+ if len(parts) == 2:
1010
+ if parts >= (2, 5):
1011
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
1012
+ 'manylinux1_%s' % arch))
1013
+ if parts >= (2, 12):
1014
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
1015
+ 'manylinux2010_%s' % arch))
1016
+ if parts >= (2, 17):
1017
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
1018
+ 'manylinux2014_%s' % arch))
1019
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
1020
+ 'manylinux_%s_%s_%s' % (parts[0], parts[1],
1021
+ arch)))
1022
+
1023
+ # where no ABI / arch dependency, but IMP_PREFIX dependency
1024
+ for i, version in enumerate(versions):
1025
+ result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
1026
+ if i == 0:
1027
+ result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
1028
+
1029
+ # no IMP_PREFIX, ABI or arch dependency
1030
+ for i, version in enumerate(versions):
1031
+ result.append((''.join(('py', version)), 'none', 'any'))
1032
+ if i == 0:
1033
+ result.append((''.join(('py', version[0])), 'none', 'any'))
1034
+
1035
+ return set(result)
1036
+
1037
+
1038
+ COMPATIBLE_TAGS = compatible_tags()
1039
+
1040
+ del compatible_tags
1041
+
1042
+
1043
+ def is_compatible(wheel, tags=None):
1044
+ if not isinstance(wheel, Wheel):
1045
+ wheel = Wheel(wheel) # assume it's a filename
1046
+ result = False
1047
+ if tags is None:
1048
+ tags = COMPATIBLE_TAGS
1049
+ for ver, abi, arch in tags:
1050
+ if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
1051
+ result = True
1052
+ break
1053
+ return result
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__about__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ __all__ = [
6
+ "__title__",
7
+ "__summary__",
8
+ "__uri__",
9
+ "__version__",
10
+ "__author__",
11
+ "__email__",
12
+ "__license__",
13
+ "__copyright__",
14
+ ]
15
+
16
+ __title__ = "packaging"
17
+ __summary__ = "Core utilities for Python packages"
18
+ __uri__ = "https://github.com/pypa/packaging"
19
+
20
+ __version__ = "21.3"
21
+
22
+ __author__ = "Donald Stufft and individual contributors"
23
+ __email__ = "[email protected]"
24
+
25
+ __license__ = "BSD-2-Clause or Apache-2.0"
26
+ __copyright__ = "2014-2019 %s" % __author__
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ from .__about__ import (
6
+ __author__,
7
+ __copyright__,
8
+ __email__,
9
+ __license__,
10
+ __summary__,
11
+ __title__,
12
+ __uri__,
13
+ __version__,
14
+ )
15
+
16
+ __all__ = [
17
+ "__title__",
18
+ "__summary__",
19
+ "__uri__",
20
+ "__version__",
21
+ "__author__",
22
+ "__email__",
23
+ "__license__",
24
+ "__copyright__",
25
+ ]
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-310.pyc ADDED
Binary file (586 Bytes). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (442 Bytes). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc ADDED
Binary file (7.3 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc ADDED
Binary file (4.61 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-310.pyc ADDED
Binary file (9.28 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-310.pyc ADDED
Binary file (3.97 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc ADDED
Binary file (21.5 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.57 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
venv/lib/python3.10/site-packages/pip/_vendor/packaging/_manylinux.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import functools
3
+ import os
4
+ import re
5
+ import struct
6
+ import sys
7
+ import warnings
8
+ from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
9
+
10
+
11
+ # Python does not provide platform information at sufficient granularity to
12
+ # identify the architecture of the running executable in some cases, so we
13
+ # determine it dynamically by reading the information from the running
14
+ # process. This only applies on Linux, which uses the ELF format.
15
+ class _ELFFileHeader:
16
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
17
+ class _InvalidELFFileHeader(ValueError):
18
+ """
19
+ An invalid ELF file header was found.
20
+ """
21
+
22
+ ELF_MAGIC_NUMBER = 0x7F454C46
23
+ ELFCLASS32 = 1
24
+ ELFCLASS64 = 2
25
+ ELFDATA2LSB = 1
26
+ ELFDATA2MSB = 2
27
+ EM_386 = 3
28
+ EM_S390 = 22
29
+ EM_ARM = 40
30
+ EM_X86_64 = 62
31
+ EF_ARM_ABIMASK = 0xFF000000
32
+ EF_ARM_ABI_VER5 = 0x05000000
33
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
34
+
35
+ def __init__(self, file: IO[bytes]) -> None:
36
+ def unpack(fmt: str) -> int:
37
+ try:
38
+ data = file.read(struct.calcsize(fmt))
39
+ result: Tuple[int, ...] = struct.unpack(fmt, data)
40
+ except struct.error:
41
+ raise _ELFFileHeader._InvalidELFFileHeader()
42
+ return result[0]
43
+
44
+ self.e_ident_magic = unpack(">I")
45
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
46
+ raise _ELFFileHeader._InvalidELFFileHeader()
47
+ self.e_ident_class = unpack("B")
48
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
49
+ raise _ELFFileHeader._InvalidELFFileHeader()
50
+ self.e_ident_data = unpack("B")
51
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
52
+ raise _ELFFileHeader._InvalidELFFileHeader()
53
+ self.e_ident_version = unpack("B")
54
+ self.e_ident_osabi = unpack("B")
55
+ self.e_ident_abiversion = unpack("B")
56
+ self.e_ident_pad = file.read(7)
57
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
58
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
59
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
60
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
61
+ self.e_type = unpack(format_h)
62
+ self.e_machine = unpack(format_h)
63
+ self.e_version = unpack(format_i)
64
+ self.e_entry = unpack(format_p)
65
+ self.e_phoff = unpack(format_p)
66
+ self.e_shoff = unpack(format_p)
67
+ self.e_flags = unpack(format_i)
68
+ self.e_ehsize = unpack(format_h)
69
+ self.e_phentsize = unpack(format_h)
70
+ self.e_phnum = unpack(format_h)
71
+ self.e_shentsize = unpack(format_h)
72
+ self.e_shnum = unpack(format_h)
73
+ self.e_shstrndx = unpack(format_h)
74
+
75
+
76
+ def _get_elf_header() -> Optional[_ELFFileHeader]:
77
+ try:
78
+ with open(sys.executable, "rb") as f:
79
+ elf_header = _ELFFileHeader(f)
80
+ except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
81
+ return None
82
+ return elf_header
83
+
84
+
85
+ def _is_linux_armhf() -> bool:
86
+ # hard-float ABI can be detected from the ELF header of the running
87
+ # process
88
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
89
+ elf_header = _get_elf_header()
90
+ if elf_header is None:
91
+ return False
92
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
93
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
94
+ result &= elf_header.e_machine == elf_header.EM_ARM
95
+ result &= (
96
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
97
+ ) == elf_header.EF_ARM_ABI_VER5
98
+ result &= (
99
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
100
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
101
+ return result
102
+
103
+
104
+ def _is_linux_i686() -> bool:
105
+ elf_header = _get_elf_header()
106
+ if elf_header is None:
107
+ return False
108
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
109
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
110
+ result &= elf_header.e_machine == elf_header.EM_386
111
+ return result
112
+
113
+
114
+ def _have_compatible_abi(arch: str) -> bool:
115
+ if arch == "armv7l":
116
+ return _is_linux_armhf()
117
+ if arch == "i686":
118
+ return _is_linux_i686()
119
+ return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
120
+
121
+
122
+ # If glibc ever changes its major version, we need to know what the last
123
+ # minor version was, so we can build the complete list of all versions.
124
+ # For now, guess what the highest minor version might be, assume it will
125
+ # be 50 for testing. Once this actually happens, update the dictionary
126
+ # with the actual value.
127
+ _LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
128
+
129
+
130
+ class _GLibCVersion(NamedTuple):
131
+ major: int
132
+ minor: int
133
+
134
+
135
+ def _glibc_version_string_confstr() -> Optional[str]:
136
+ """
137
+ Primary implementation of glibc_version_string using os.confstr.
138
+ """
139
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
140
+ # to be broken or missing. This strategy is used in the standard library
141
+ # platform module.
142
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
143
+ try:
144
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
145
+ version_string = os.confstr("CS_GNU_LIBC_VERSION")
146
+ assert version_string is not None
147
+ _, version = version_string.split()
148
+ except (AssertionError, AttributeError, OSError, ValueError):
149
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
150
+ return None
151
+ return version
152
+
153
+
154
+ def _glibc_version_string_ctypes() -> Optional[str]:
155
+ """
156
+ Fallback implementation of glibc_version_string using ctypes.
157
+ """
158
+ try:
159
+ import ctypes
160
+ except ImportError:
161
+ return None
162
+
163
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
164
+ # manpage says, "If filename is NULL, then the returned handle is for the
165
+ # main program". This way we can let the linker do the work to figure out
166
+ # which libc our process is actually using.
167
+ #
168
+ # We must also handle the special case where the executable is not a
169
+ # dynamically linked executable. This can occur when using musl libc,
170
+ # for example. In this situation, dlopen() will error, leading to an
171
+ # OSError. Interestingly, at least in the case of musl, there is no
172
+ # errno set on the OSError. The single string argument used to construct
173
+ # OSError comes from libc itself and is therefore not portable to
174
+ # hard code here. In any case, failure to call dlopen() means we
175
+ # can proceed, so we bail on our attempt.
176
+ try:
177
+ process_namespace = ctypes.CDLL(None)
178
+ except OSError:
179
+ return None
180
+
181
+ try:
182
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
183
+ except AttributeError:
184
+ # Symbol doesn't exist -> therefore, we are not linked to
185
+ # glibc.
186
+ return None
187
+
188
+ # Call gnu_get_libc_version, which returns a string like "2.5"
189
+ gnu_get_libc_version.restype = ctypes.c_char_p
190
+ version_str: str = gnu_get_libc_version()
191
+ # py2 / py3 compatibility:
192
+ if not isinstance(version_str, str):
193
+ version_str = version_str.decode("ascii")
194
+
195
+ return version_str
196
+
197
+
198
+ def _glibc_version_string() -> Optional[str]:
199
+ """Returns glibc version string, or None if not using glibc."""
200
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
201
+
202
+
203
+ def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
204
+ """Parse glibc version.
205
+
206
+ We use a regexp instead of str.split because we want to discard any
207
+ random junk that might come after the minor version -- this might happen
208
+ in patched/forked versions of glibc (e.g. Linaro's version of glibc
209
+ uses version strings like "2.20-2014.11"). See gh-3588.
210
+ """
211
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
212
+ if not m:
213
+ warnings.warn(
214
+ "Expected glibc version with 2 components major.minor,"
215
+ " got: %s" % version_str,
216
+ RuntimeWarning,
217
+ )
218
+ return -1, -1
219
+ return int(m.group("major")), int(m.group("minor"))
220
+
221
+
222
+ @functools.lru_cache()
223
+ def _get_glibc_version() -> Tuple[int, int]:
224
+ version_str = _glibc_version_string()
225
+ if version_str is None:
226
+ return (-1, -1)
227
+ return _parse_glibc_version(version_str)
228
+
229
+
230
+ # From PEP 513, PEP 600
231
+ def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
232
+ sys_glibc = _get_glibc_version()
233
+ if sys_glibc < version:
234
+ return False
235
+ # Check for presence of _manylinux module.
236
+ try:
237
+ import _manylinux # noqa
238
+ except ImportError:
239
+ return True
240
+ if hasattr(_manylinux, "manylinux_compatible"):
241
+ result = _manylinux.manylinux_compatible(version[0], version[1], arch)
242
+ if result is not None:
243
+ return bool(result)
244
+ return True
245
+ if version == _GLibCVersion(2, 5):
246
+ if hasattr(_manylinux, "manylinux1_compatible"):
247
+ return bool(_manylinux.manylinux1_compatible)
248
+ if version == _GLibCVersion(2, 12):
249
+ if hasattr(_manylinux, "manylinux2010_compatible"):
250
+ return bool(_manylinux.manylinux2010_compatible)
251
+ if version == _GLibCVersion(2, 17):
252
+ if hasattr(_manylinux, "manylinux2014_compatible"):
253
+ return bool(_manylinux.manylinux2014_compatible)
254
+ return True
255
+
256
+
257
+ _LEGACY_MANYLINUX_MAP = {
258
+ # CentOS 7 w/ glibc 2.17 (PEP 599)
259
+ (2, 17): "manylinux2014",
260
+ # CentOS 6 w/ glibc 2.12 (PEP 571)
261
+ (2, 12): "manylinux2010",
262
+ # CentOS 5 w/ glibc 2.5 (PEP 513)
263
+ (2, 5): "manylinux1",
264
+ }
265
+
266
+
267
+ def platform_tags(linux: str, arch: str) -> Iterator[str]:
268
+ if not _have_compatible_abi(arch):
269
+ return
270
+ # Oldest glibc to be supported regardless of architecture is (2, 17).
271
+ too_old_glibc2 = _GLibCVersion(2, 16)
272
+ if arch in {"x86_64", "i686"}:
273
+ # On x86/i686 also oldest glibc to be supported is (2, 5).
274
+ too_old_glibc2 = _GLibCVersion(2, 4)
275
+ current_glibc = _GLibCVersion(*_get_glibc_version())
276
+ glibc_max_list = [current_glibc]
277
+ # We can assume compatibility across glibc major versions.
278
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
279
+ #
280
+ # Build a list of maximum glibc versions so that we can
281
+ # output the canonical list of all glibc from current_glibc
282
+ # down to too_old_glibc2, including all intermediary versions.
283
+ for glibc_major in range(current_glibc.major - 1, 1, -1):
284
+ glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
285
+ glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
286
+ for glibc_max in glibc_max_list:
287
+ if glibc_max.major == too_old_glibc2.major:
288
+ min_minor = too_old_glibc2.minor
289
+ else:
290
+ # For other glibc major versions oldest supported is (x, 0).
291
+ min_minor = -1
292
+ for glibc_minor in range(glibc_max.minor, min_minor, -1):
293
+ glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
294
+ tag = "manylinux_{}_{}".format(*glibc_version)
295
+ if _is_compatible(tag, arch, glibc_version):
296
+ yield linux.replace("linux", tag)
297
+ # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
298
+ if glibc_version in _LEGACY_MANYLINUX_MAP:
299
+ legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
300
+ if _is_compatible(legacy_tag, arch, glibc_version):
301
+ yield linux.replace("linux", legacy_tag)
venv/lib/python3.10/site-packages/pip/_vendor/packaging/_musllinux.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PEP 656 support.
2
+
3
+ This module implements logic to detect if the currently running Python is
4
+ linked against musl, and what musl version is used.
5
+ """
6
+
7
+ import contextlib
8
+ import functools
9
+ import operator
10
+ import os
11
+ import re
12
+ import struct
13
+ import subprocess
14
+ import sys
15
+ from typing import IO, Iterator, NamedTuple, Optional, Tuple
16
+
17
+
18
+ def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
19
+ return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
20
+
21
+
22
+ def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
23
+ """Detect musl libc location by parsing the Python executable.
24
+
25
+ Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
26
+ ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
27
+ """
28
+ f.seek(0)
29
+ try:
30
+ ident = _read_unpacked(f, "16B")
31
+ except struct.error:
32
+ return None
33
+ if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
34
+ return None
35
+ f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
36
+
37
+ try:
38
+ # e_fmt: Format for program header.
39
+ # p_fmt: Format for section header.
40
+ # p_idx: Indexes to find p_type, p_offset, and p_filesz.
41
+ e_fmt, p_fmt, p_idx = {
42
+ 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
43
+ 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
44
+ }[ident[4]]
45
+ except KeyError:
46
+ return None
47
+ else:
48
+ p_get = operator.itemgetter(*p_idx)
49
+
50
+ # Find the interpreter section and return its content.
51
+ try:
52
+ _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
53
+ except struct.error:
54
+ return None
55
+ for i in range(e_phnum + 1):
56
+ f.seek(e_phoff + e_phentsize * i)
57
+ try:
58
+ p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
59
+ except struct.error:
60
+ return None
61
+ if p_type != 3: # Not PT_INTERP.
62
+ continue
63
+ f.seek(p_offset)
64
+ interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
65
+ if "musl" not in interpreter:
66
+ return None
67
+ return interpreter
68
+ return None
69
+
70
+
71
+ class _MuslVersion(NamedTuple):
72
+ major: int
73
+ minor: int
74
+
75
+
76
+ def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
77
+ lines = [n for n in (n.strip() for n in output.splitlines()) if n]
78
+ if len(lines) < 2 or lines[0][:4] != "musl":
79
+ return None
80
+ m = re.match(r"Version (\d+)\.(\d+)", lines[1])
81
+ if not m:
82
+ return None
83
+ return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
84
+
85
+
86
+ @functools.lru_cache()
87
+ def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
88
+ """Detect currently-running musl runtime version.
89
+
90
+ This is done by checking the specified executable's dynamic linking
91
+ information, and invoking the loader to parse its output for a version
92
+ string. If the loader is musl, the output would be something like::
93
+
94
+ musl libc (x86_64)
95
+ Version 1.2.2
96
+ Dynamic Program Loader
97
+ """
98
+ with contextlib.ExitStack() as stack:
99
+ try:
100
+ f = stack.enter_context(open(executable, "rb"))
101
+ except OSError:
102
+ return None
103
+ ld = _parse_ld_musl_from_elf(f)
104
+ if not ld:
105
+ return None
106
+ proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
107
+ return _parse_musl_version(proc.stderr)
108
+
109
+
110
+ def platform_tags(arch: str) -> Iterator[str]:
111
+ """Generate musllinux tags compatible to the current platform.
112
+
113
+ :param arch: Should be the part of platform tag after the ``linux_``
114
+ prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
115
+ prerequisite for the current platform to be musllinux-compatible.
116
+
117
+ :returns: An iterator of compatible musllinux tags.
118
+ """
119
+ sys_musl = _get_musl_version(sys.executable)
120
+ if sys_musl is None: # Python not dynamically linked against musl.
121
+ return
122
+ for minor in range(sys_musl.minor, -1, -1):
123
+ yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
124
+
125
+
126
+ if __name__ == "__main__": # pragma: no cover
127
+ import sysconfig
128
+
129
+ plat = sysconfig.get_platform()
130
+ assert plat.startswith("linux-"), "not linux"
131
+
132
+ print("plat:", plat)
133
+ print("musl:", _get_musl_version(sys.executable))
134
+ print("tags:", end=" ")
135
+ for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
136
+ print(t, end="\n ")