index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
39,913 |
lerc._lerc
|
getLercBlobInfo_4D
| null |
def getLercBlobInfo_4D(lercBlob, printInfo = False):
return _getLercBlobInfo_Ext(lercBlob, 1, printInfo)
|
(lercBlob, printInfo=False)
|
39,914 |
lerc._lerc
|
getLercDataRanges
| null |
def getLercDataRanges(lercBlob, nDepth, nBands, printInfo = False):
global lercDll
nBytes = len(lercBlob)
len0 = nDepth * nBands;
cpBytes = ct.cast(lercBlob, ct.c_char_p)
mins = ct.create_string_buffer(len0 * 8)
maxs = ct.create_string_buffer(len0 * 8)
cpMins = ct.cast(mins, ct.POINTER(ct.c_double))
cpMaxs = ct.cast(maxs, ct.POINTER(ct.c_double))
if printInfo:
start = timer()
result = lercDll.lerc_getDataRanges(cpBytes, nBytes, nDepth, nBands, cpMins, cpMaxs)
if result > 0:
print('Error in getLercDataRanges(): lercDLL.lerc_getDataRanges() failed with error code = ', result)
return (result)
if printInfo:
end = timer()
print('time lerc_getDataRanges() = ', (end - start))
print('data ranges per band and depth:')
for i in range(nBands):
for j in range(nDepth):
print('band', i, 'depth', j, ': [', cpMins[i * nDepth + j], ',', cpMaxs[i * nDepth + j], ']')
npMins = np.frombuffer(mins, 'd')
npMaxs = np.frombuffer(maxs, 'd')
npMins.shape = (nBands, nDepth)
npMaxs.shape = (nBands, nDepth)
return (result, npMins, npMaxs)
|
(lercBlob, nDepth, nBands, printInfo=False)
|
39,915 |
lerc._lerc
|
getLercDatatype
| null |
def getLercDatatype(npDtype):
switcher = {
np.dtype('b'): 0, # char or int8
np.dtype('B'): 1, # byte or uint8
np.dtype('h'): 2, # short or int16
np.dtype('H'): 3, # ushort or uint16
np.dtype('i'): 4, # int or int32
np.dtype('I'): 5, # uint or uint32
np.dtype('f'): 6, # float or float32
np.dtype('d'): 7 # double or float64
}
return switcher.get(npDtype, -1)
|
(npDtype)
|
39,916 |
lerc._lerc
|
getLercShape
| null |
def getLercShape(npArr, nValuesPerPixel):
nBands = 1
dim = npArr.ndim
npShape = npArr.shape
if nValuesPerPixel == 1:
if dim == 2:
(nRows, nCols) = npShape
elif dim == 3:
(nBands, nRows, nCols) = npShape # or band interleaved
elif nValuesPerPixel > 1:
if dim == 3:
(nRows, nCols, nValpp) = npShape # or pixel interleaved
elif dim == 4:
(nBands, nRows, nCols, nValpp) = npShape # 4D array
if nValpp != nValuesPerPixel:
return (0, 0, 0)
return (nBands, nRows, nCols)
|
(npArr, nValuesPerPixel)
|
39,920 |
lerc._lerc
|
test
| null |
def test():
fctErr = 'Error in test(): '
# data types supported by Lerc, all little endian byte order
# 'b', 'B', 'h', 'H', 'i', 'I', 'f', 'd'
print('\n -------- encode test 1 -------- ')
nBands = 1
nRows = 256
nCols = 256
nValuesPerPixel = 3 # values or array per pixel, could be RGB values or hyper spectral image
npArr = np.zeros((nRows, nCols, nValuesPerPixel), 'f', 'C') # data type float, C order
#npValidMask = np.full((nRows, nCols), True) # set all pixels valid
npValidMask = None # same as all pixels valid
maxZErr = 0.001
# fill it with something
for i in range(nRows):
for j in range(nCols):
for k in range(nValuesPerPixel):
npArr[i][j][k] = 0.001 * i * j + k
# call with buffer size 0 to only compute compressed size, optional
numBytesNeeded = 0
(result, numBytesNeeded) = encode(npArr, nValuesPerPixel, False, npValidMask,
maxZErr, numBytesNeeded, True)
if result > 0:
print(fctErr, 'encode() failed with error code = ', result)
return result
print('computed compressed size = ', numBytesNeeded)
# encode with numBytesNeeded from above or big enough estimate
(result, numBytesWritten, outBuffer) = encode(npArr, nValuesPerPixel, False, npValidMask,
maxZErr, numBytesNeeded, True)
if result > 0:
print(fctErr, 'encode() failed with error code = ', result)
return result
print('num bytes written to buffer = ', numBytesWritten)
# decode again
(result, npArrDec, npValidMaskDec) = decode(outBuffer, True)
if result > 0:
print(fctErr, 'decode() failed with error code = ', result)
return result
# evaluate the difference to orig
maxZErrFound = findMaxZError_4D(npArr, npArrDec, npValidMaskDec, nBands)
print('maxZErr found = ', maxZErrFound)
# find the range [zMin, zMax] in the numpy array
(zMin, zMax) = findDataRange(npArrDec, False, None, nBands, True)
print('data range found = ', zMin, zMax)
print('\n -------- encode test 2 -------- ')
nBands = 3
nRows = 256
nCols = 256
nValuesPerPixel = 1
npArr = np.zeros((nBands, nRows, nCols), 'f', 'C') # data type float, C order
npValidMask = np.full((nRows, nCols), True) # set all pixels valid
maxZErr = 0.001
# fill it with something
for m in range(nBands):
for i in range(nRows):
for j in range(nCols):
npArr[m][i][j] = 0.001 * i * j + m
# encode
nBytesBigEnough = npArr.nbytes * 2
(result, numBytesWritten, outBuffer) = encode(npArr, nValuesPerPixel, True, npValidMask,
maxZErr, nBytesBigEnough, True)
if result > 0:
print(fctErr, 'encode() failed with error code = ', result)
return result
print('num bytes written to buffer = ', numBytesWritten)
# decode again
(result, npArrDec, npValidMaskDec) = decode(outBuffer, True)
if result > 0:
print(fctErr, 'decode() failed with error code = ', result)
return result
# evaluate the difference to orig
maxZErrFound = findMaxZError_4D(npArr, npArrDec, npValidMaskDec, nBands)
print('maxZErr found = ', maxZErrFound)
# find the range [zMin, zMax]
(zMin, zMax) = findDataRange(npArrDec, True, npValidMaskDec, nBands, True)
print('data range found = ', zMin, zMax)
# save compressed Lerc blob to disk
#open('C:/temp/test_1_256_256_3_double.lrc', 'wb').write(outBuffer)
print('\n -------- encode test 3 -------- ')
# example for the new _4D() and _ma() functions in Lerc version 4.0
nBands = 3
nRows = 512
nCols = 512
nValuesPerPixel = 2 # values or array per pixel
npArr = np.zeros((nBands, nRows, nCols, nValuesPerPixel), 'f', 'C') # data type float, C order
npValidMask = None # same as all pixels valid, but we are going to add a noData value
maxZErr = 0.01
noDataVal = -9999.0
cntInvalid = 0
# fill it with something
start = timer()
for m in range(nBands):
for i in range(nRows):
for j in range(nCols):
for k in range(nValuesPerPixel):
z = 0.001 * i * j + 5 * m + k
# for all values at the same pixel, will get pushed into the byte mask
if j == i:
z = noDataVal
cntInvalid += 1
# create mixed case, decoded output will use noData for this one pixel in band 0
if (m == 0 and i == 5 and j == 7 and k == 0):
z = noDataVal
cntInvalid += 1
npArr[m][i][j][k] = z
end = timer()
print('time fill test array = ', (end - start))
# prepare noData arrays
npNoDataArr = np.zeros((nBands), 'd') # noData value is always type double
npNoDataArr.fill(noDataVal) # noData value can vary between bands
npmaNoData = np.ma.array(npNoDataArr, mask = False) # input has noData values in all 3 bands
# part A, using _4D() functions:
# encode using encode_4D()
start = timer()
nBytesBigEnough = npArr.nbytes * 2
(result, numBytesWritten, outBuffer) = encode_4D(npArr, nValuesPerPixel, npValidMask, maxZErr,
nBytesBigEnough, npmaNoData, False)
end = timer()
if result > 0:
print(fctErr, 'encode_4D() failed with error code = ', result)
return result
print('time encode_4D() = ', (end - start))
print('num bytes written to buffer = ', numBytesWritten)
# decode using decode_4D()
start = timer()
(result, npArrDec, npValidMaskDec, npmaNoDataDec) = decode_4D(outBuffer, False)
end = timer()
if result > 0:
print(fctErr, 'decode_4D() failed with error code = ', result)
return result
print('time decode_4D() = ', (end - start))
# evaluate the difference to orig
maxZErrFound = findMaxZError_4D(npArr, npArrDec, npValidMaskDec, nBands)
print('maxZErr found = ', maxZErrFound)
# find the range [zMin, zMax]
npmaArrDec = convert2ma(npArrDec, npValidMaskDec, nValuesPerPixel, nBands, npmaNoDataDec)
(zMin, zMax) = findDataRange_ma(npmaArrDec)
print('data range found = ', zMin, zMax)
# part B, using _ma() functions or masked arrays:
npmaArr = np.ma.array(npArr, mask = False)
start = timer()
(result, numBytesWritten2, outBuffer2) = encode_ma(npmaArr, nValuesPerPixel, maxZErr,
nBytesBigEnough, npmaNoData, False)
end = timer()
if result > 0:
print(fctErr, 'encode_ma() failed with error code = ', result)
return result
print('time encode_ma() = ', (end - start))
print('num bytes written to buffer = ', numBytesWritten2)
print('lerc blob size from encode_4D() = ', numBytesWritten,
', and from encode_ma() = ', numBytesWritten2)
#decode using decode_ma()
start = timer()
(result, npmaArrDec, nDepthDec, npmaNoDataDec2) = decode_ma(outBuffer2, False)
end = timer()
if result > 0:
print(fctErr, 'decode_ma() failed with error code = ', result)
return result
print('time decode_ma() = ', (end - start))
# find the range [zMin, zMax], again
(zMin, zMax) = findDataRange_ma(npmaArrDec)
print('data range found for ma = ', zMin, zMax)
print('number of invalid values, orig = ', cntInvalid, ', in masked array = ',
np.ma.count_masked(npmaArrDec))
if False:
print('\n -------- decode test on ~100 different Lerc blobs -------- ')
folder = 'D:/GitHub/LercOpenSource_v2.5/testData/'
listFile = folder + '_list.txt'
with open(listFile, 'r') as f:
lines = f.readlines()
f.close()
skipFirstLine = True
for line in lines:
if skipFirstLine:
skipFirstLine = False
continue
fn = folder + line.rstrip()
bytesRead = open(fn, 'rb').read()
# read the blob header, optional
(result, codecVersion, dataType, nValuesPerPixel, nCols, nRows, nBands, nValidPixels,
blobSize, nMasks, zMin, zMax, maxZErrUsed, nUsesNoData) = getLercBlobInfo_4D(bytesRead, False)
if result > 0:
print(fctErr, 'getLercBlobInfo_4D() failed with error code = ', result)
return result
# read the data ranges, optional
if nUsesNoData == 0:
(result, npMins, npMaxs) = getLercDataRanges(bytesRead, nValuesPerPixel, nBands, False)
if result > 0:
print(fctErr, 'getLercDataRanges() failed with error code = ', result)
return result
# decode
(result, npmaArr, nDepth, npmaNoData) = decode_ma(bytesRead, False)
if result > 0:
print(fctErr, 'decode_ma() failed with error code = ', result)
return result
# find the range [zMin, zMax]
(zMin, zMax) = findDataRange_ma(npmaArr)
print(f'codec {codecVersion:1}, dt {dataType:1}, nDepth {nValuesPerPixel:3}, nCols {nCols:5},',
f'nRows {nRows:5}, nBands {nBands:3}, nMasks {nMasks:3}, maxZErr {maxZErrUsed:.6f},',
f'nUsesNoData {nUsesNoData:3}, zMin {zMin:9.3f}, zMax {zMax:14.3f}, ', line.rstrip())
return result
|
()
|
39,921 |
tldextract.tldextract
|
TLDExtract
|
A callable for extracting, subdomain, domain, and suffix components from a URL.
|
class TLDExtract:
"""A callable for extracting, subdomain, domain, and suffix components from a URL."""
# TODO: too-many-arguments
def __init__(
self,
cache_dir: str | None = get_cache_dir(),
suffix_list_urls: Sequence[str] = PUBLIC_SUFFIX_LIST_URLS,
fallback_to_snapshot: bool = True,
include_psl_private_domains: bool = False,
extra_suffixes: Sequence[str] = (),
cache_fetch_timeout: str | float | None = CACHE_TIMEOUT,
) -> None:
"""Construct a callable for extracting subdomain, domain, and suffix components from a URL.
Upon calling it, it first checks for a JSON in `cache_dir`. By default,
the `cache_dir` will live in the tldextract directory. You can disable
the caching functionality of this module by setting `cache_dir` to `None`.
If the cached version does not exist (such as on the first run), HTTP request the URLs in
`suffix_list_urls` in order, until one returns public suffix list data. To disable HTTP
requests, set this to an empty sequence.
The default list of URLs point to the latest version of the Mozilla Public Suffix List and
its mirror, but any similar document could be specified. Local files can be specified by
using the `file://` protocol. (See `urllib2` documentation.)
If there is no cached version loaded and no data is found from the `suffix_list_urls`,
the module will fall back to the included TLD set snapshot. If you do not want
this behavior, you may set `fallback_to_snapshot` to False, and an exception will be
raised instead.
The Public Suffix List includes a list of "private domains" as TLDs,
such as blogspot.com. These do not fit `tldextract`'s definition of a
suffix, so these domains are excluded by default. If you'd like them
included instead, set `include_psl_private_domains` to True.
You can pass additional suffixes in `extra_suffixes` argument without changing list URL
cache_fetch_timeout is passed unmodified to the underlying request object
per the requests documentation here:
http://docs.python-requests.org/en/master/user/advanced/#timeouts
cache_fetch_timeout can also be set to a single value with the
environment variable TLDEXTRACT_CACHE_TIMEOUT, like so:
TLDEXTRACT_CACHE_TIMEOUT="1.2"
When set this way, the same timeout value will be used for both connect
and read timeouts
"""
suffix_list_urls = suffix_list_urls or ()
self.suffix_list_urls = tuple(
url.strip() for url in suffix_list_urls if url.strip()
)
self.fallback_to_snapshot = fallback_to_snapshot
if not (self.suffix_list_urls or cache_dir or self.fallback_to_snapshot):
raise ValueError(
"The arguments you have provided disable all ways for tldextract "
"to obtain data. Please provide a suffix list data, a cache_dir, "
"or set `fallback_to_snapshot` to `True`."
)
self.include_psl_private_domains = include_psl_private_domains
self.extra_suffixes = extra_suffixes
self._extractor: _PublicSuffixListTLDExtractor | None = None
self.cache_fetch_timeout = (
float(cache_fetch_timeout)
if isinstance(cache_fetch_timeout, str)
else cache_fetch_timeout
)
self._cache = DiskCache(cache_dir)
def __call__(
self,
url: str,
include_psl_private_domains: bool | None = None,
session: requests.Session | None = None,
) -> ExtractResult:
"""Alias for `extract_str`."""
return self.extract_str(url, include_psl_private_domains, session=session)
def extract_str(
self,
url: str,
include_psl_private_domains: bool | None = None,
session: requests.Session | None = None,
) -> ExtractResult:
"""Take a string URL and splits it into its subdomain, domain, and suffix components.
I.e. its effective TLD, gTLD, ccTLD, etc. components.
>>> extractor = TLDExtract()
>>> extractor.extract_str('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
>>> extractor.extract_str('http://forums.bbc.co.uk/')
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk', is_private=False)
Allows configuring the HTTP request via the optional `session`
parameter. For example, if you need to use a HTTP proxy. See also
`requests.Session`.
>>> import requests
>>> session = requests.Session()
>>> # customize your session here
>>> with session:
... extractor.extract_str("http://forums.news.cnn.com/", session=session)
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
"""
return self._extract_netloc(
lenient_netloc(url), include_psl_private_domains, session=session
)
def extract_urllib(
self,
url: urllib.parse.ParseResult | urllib.parse.SplitResult,
include_psl_private_domains: bool | None = None,
session: requests.Session | None = None,
) -> ExtractResult:
"""Take the output of urllib.parse URL parsing methods and further splits the parsed URL.
Splits the parsed URL into its subdomain, domain, and suffix
components, i.e. its effective TLD, gTLD, ccTLD, etc. components.
This method is like `extract_str` but faster, as the string's domain
name has already been parsed.
>>> extractor = TLDExtract()
>>> extractor.extract_urllib(urllib.parse.urlsplit('http://forums.news.cnn.com/'))
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
>>> extractor.extract_urllib(urllib.parse.urlsplit('http://forums.bbc.co.uk/'))
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk', is_private=False)
"""
return self._extract_netloc(
url.netloc, include_psl_private_domains, session=session
)
def _extract_netloc(
self,
netloc: str,
include_psl_private_domains: bool | None,
session: requests.Session | None = None,
) -> ExtractResult:
netloc_with_ascii_dots = (
netloc.replace("\u3002", "\u002e")
.replace("\uff0e", "\u002e")
.replace("\uff61", "\u002e")
)
min_num_ipv6_chars = 4
if (
len(netloc_with_ascii_dots) >= min_num_ipv6_chars
and netloc_with_ascii_dots[0] == "["
and netloc_with_ascii_dots[-1] == "]"
):
if looks_like_ipv6(netloc_with_ascii_dots[1:-1]):
return ExtractResult("", netloc_with_ascii_dots, "", is_private=False)
labels = netloc_with_ascii_dots.split(".")
suffix_index, is_private = self._get_tld_extractor(
session=session
).suffix_index(labels, include_psl_private_domains=include_psl_private_domains)
num_ipv4_labels = 4
if suffix_index == len(labels) == num_ipv4_labels and looks_like_ip(
netloc_with_ascii_dots
):
return ExtractResult("", netloc_with_ascii_dots, "", is_private)
suffix = ".".join(labels[suffix_index:]) if suffix_index != len(labels) else ""
subdomain = ".".join(labels[: suffix_index - 1]) if suffix_index >= 2 else ""
domain = labels[suffix_index - 1] if suffix_index else ""
return ExtractResult(subdomain, domain, suffix, is_private)
def update(
self, fetch_now: bool = False, session: requests.Session | None = None
) -> None:
"""Force fetch the latest suffix list definitions."""
self._extractor = None
self._cache.clear()
if fetch_now:
self._get_tld_extractor(session=session)
@property
def tlds(self, session: requests.Session | None = None) -> list[str]:
"""Returns the list of tld's used by default.
This will vary based on `include_psl_private_domains` and `extra_suffixes`
"""
return list(self._get_tld_extractor(session=session).tlds())
def _get_tld_extractor(
self, session: requests.Session | None = None
) -> _PublicSuffixListTLDExtractor:
"""Get or compute this object's TLDExtractor.
Looks up the TLDExtractor in roughly the following order, based on the
settings passed to __init__:
1. Memoized on `self`
2. Local system _cache file
3. Remote PSL, over HTTP
4. Bundled PSL snapshot file
"""
if self._extractor:
return self._extractor
public_tlds, private_tlds = get_suffix_lists(
cache=self._cache,
urls=self.suffix_list_urls,
cache_fetch_timeout=self.cache_fetch_timeout,
fallback_to_snapshot=self.fallback_to_snapshot,
session=session,
)
if not any([public_tlds, private_tlds, self.extra_suffixes]):
raise ValueError("No tlds set. Cannot proceed without tlds.")
self._extractor = _PublicSuffixListTLDExtractor(
public_tlds=public_tlds,
private_tlds=private_tlds,
extra_tlds=list(self.extra_suffixes),
include_psl_private_domains=self.include_psl_private_domains,
)
return self._extractor
|
(cache_dir: 'str | None' = '/root/.cache/python-tldextract/3.10.14.final__local__ecb11d__tldextract-5.1.2', suffix_list_urls: 'Sequence[str]' = ('https://publicsuffix.org/list/public_suffix_list.dat', 'https://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.dat'), fallback_to_snapshot: 'bool' = True, include_psl_private_domains: 'bool' = False, extra_suffixes: 'Sequence[str]' = (), cache_fetch_timeout: 'str | float | None' = None) -> 'None'
|
39,922 |
tldextract.tldextract
|
__call__
|
Alias for `extract_str`.
|
def __call__(
self,
url: str,
include_psl_private_domains: bool | None = None,
session: requests.Session | None = None,
) -> ExtractResult:
"""Alias for `extract_str`."""
return self.extract_str(url, include_psl_private_domains, session=session)
|
(self, url: str, include_psl_private_domains: Optional[bool] = None, session: Optional[requests.sessions.Session] = None) -> tldextract.tldextract.ExtractResult
|
39,923 |
tldextract.tldextract
|
__init__
|
Construct a callable for extracting subdomain, domain, and suffix components from a URL.
Upon calling it, it first checks for a JSON in `cache_dir`. By default,
the `cache_dir` will live in the tldextract directory. You can disable
the caching functionality of this module by setting `cache_dir` to `None`.
If the cached version does not exist (such as on the first run), HTTP request the URLs in
`suffix_list_urls` in order, until one returns public suffix list data. To disable HTTP
requests, set this to an empty sequence.
The default list of URLs point to the latest version of the Mozilla Public Suffix List and
its mirror, but any similar document could be specified. Local files can be specified by
using the `file://` protocol. (See `urllib2` documentation.)
If there is no cached version loaded and no data is found from the `suffix_list_urls`,
the module will fall back to the included TLD set snapshot. If you do not want
this behavior, you may set `fallback_to_snapshot` to False, and an exception will be
raised instead.
The Public Suffix List includes a list of "private domains" as TLDs,
such as blogspot.com. These do not fit `tldextract`'s definition of a
suffix, so these domains are excluded by default. If you'd like them
included instead, set `include_psl_private_domains` to True.
You can pass additional suffixes in `extra_suffixes` argument without changing list URL
cache_fetch_timeout is passed unmodified to the underlying request object
per the requests documentation here:
http://docs.python-requests.org/en/master/user/advanced/#timeouts
cache_fetch_timeout can also be set to a single value with the
environment variable TLDEXTRACT_CACHE_TIMEOUT, like so:
TLDEXTRACT_CACHE_TIMEOUT="1.2"
When set this way, the same timeout value will be used for both connect
and read timeouts
|
def __init__(
self,
cache_dir: str | None = get_cache_dir(),
suffix_list_urls: Sequence[str] = PUBLIC_SUFFIX_LIST_URLS,
fallback_to_snapshot: bool = True,
include_psl_private_domains: bool = False,
extra_suffixes: Sequence[str] = (),
cache_fetch_timeout: str | float | None = CACHE_TIMEOUT,
) -> None:
"""Construct a callable for extracting subdomain, domain, and suffix components from a URL.
Upon calling it, it first checks for a JSON in `cache_dir`. By default,
the `cache_dir` will live in the tldextract directory. You can disable
the caching functionality of this module by setting `cache_dir` to `None`.
If the cached version does not exist (such as on the first run), HTTP request the URLs in
`suffix_list_urls` in order, until one returns public suffix list data. To disable HTTP
requests, set this to an empty sequence.
The default list of URLs point to the latest version of the Mozilla Public Suffix List and
its mirror, but any similar document could be specified. Local files can be specified by
using the `file://` protocol. (See `urllib2` documentation.)
If there is no cached version loaded and no data is found from the `suffix_list_urls`,
the module will fall back to the included TLD set snapshot. If you do not want
this behavior, you may set `fallback_to_snapshot` to False, and an exception will be
raised instead.
The Public Suffix List includes a list of "private domains" as TLDs,
such as blogspot.com. These do not fit `tldextract`'s definition of a
suffix, so these domains are excluded by default. If you'd like them
included instead, set `include_psl_private_domains` to True.
You can pass additional suffixes in `extra_suffixes` argument without changing list URL
cache_fetch_timeout is passed unmodified to the underlying request object
per the requests documentation here:
http://docs.python-requests.org/en/master/user/advanced/#timeouts
cache_fetch_timeout can also be set to a single value with the
environment variable TLDEXTRACT_CACHE_TIMEOUT, like so:
TLDEXTRACT_CACHE_TIMEOUT="1.2"
When set this way, the same timeout value will be used for both connect
and read timeouts
"""
suffix_list_urls = suffix_list_urls or ()
self.suffix_list_urls = tuple(
url.strip() for url in suffix_list_urls if url.strip()
)
self.fallback_to_snapshot = fallback_to_snapshot
if not (self.suffix_list_urls or cache_dir or self.fallback_to_snapshot):
raise ValueError(
"The arguments you have provided disable all ways for tldextract "
"to obtain data. Please provide a suffix list data, a cache_dir, "
"or set `fallback_to_snapshot` to `True`."
)
self.include_psl_private_domains = include_psl_private_domains
self.extra_suffixes = extra_suffixes
self._extractor: _PublicSuffixListTLDExtractor | None = None
self.cache_fetch_timeout = (
float(cache_fetch_timeout)
if isinstance(cache_fetch_timeout, str)
else cache_fetch_timeout
)
self._cache = DiskCache(cache_dir)
|
(self, cache_dir: str | None = '/root/.cache/python-tldextract/3.10.14.final__local__ecb11d__tldextract-5.1.2', suffix_list_urls: collections.abc.Sequence[str] = ('https://publicsuffix.org/list/public_suffix_list.dat', 'https://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.dat'), fallback_to_snapshot: bool = True, include_psl_private_domains: bool = False, extra_suffixes: collections.abc.Sequence[str] = (), cache_fetch_timeout: Union[str, float, NoneType] = None) -> NoneType
|
39,924 |
tldextract.tldextract
|
_extract_netloc
| null |
def _extract_netloc(
self,
netloc: str,
include_psl_private_domains: bool | None,
session: requests.Session | None = None,
) -> ExtractResult:
netloc_with_ascii_dots = (
netloc.replace("\u3002", "\u002e")
.replace("\uff0e", "\u002e")
.replace("\uff61", "\u002e")
)
min_num_ipv6_chars = 4
if (
len(netloc_with_ascii_dots) >= min_num_ipv6_chars
and netloc_with_ascii_dots[0] == "["
and netloc_with_ascii_dots[-1] == "]"
):
if looks_like_ipv6(netloc_with_ascii_dots[1:-1]):
return ExtractResult("", netloc_with_ascii_dots, "", is_private=False)
labels = netloc_with_ascii_dots.split(".")
suffix_index, is_private = self._get_tld_extractor(
session=session
).suffix_index(labels, include_psl_private_domains=include_psl_private_domains)
num_ipv4_labels = 4
if suffix_index == len(labels) == num_ipv4_labels and looks_like_ip(
netloc_with_ascii_dots
):
return ExtractResult("", netloc_with_ascii_dots, "", is_private)
suffix = ".".join(labels[suffix_index:]) if suffix_index != len(labels) else ""
subdomain = ".".join(labels[: suffix_index - 1]) if suffix_index >= 2 else ""
domain = labels[suffix_index - 1] if suffix_index else ""
return ExtractResult(subdomain, domain, suffix, is_private)
|
(self, netloc: str, include_psl_private_domains: bool | None, session: Optional[requests.sessions.Session] = None) -> tldextract.tldextract.ExtractResult
|
39,925 |
tldextract.tldextract
|
_get_tld_extractor
|
Get or compute this object's TLDExtractor.
Looks up the TLDExtractor in roughly the following order, based on the
settings passed to __init__:
1. Memoized on `self`
2. Local system _cache file
3. Remote PSL, over HTTP
4. Bundled PSL snapshot file
|
def _get_tld_extractor(
self, session: requests.Session | None = None
) -> _PublicSuffixListTLDExtractor:
"""Get or compute this object's TLDExtractor.
Looks up the TLDExtractor in roughly the following order, based on the
settings passed to __init__:
1. Memoized on `self`
2. Local system _cache file
3. Remote PSL, over HTTP
4. Bundled PSL snapshot file
"""
if self._extractor:
return self._extractor
public_tlds, private_tlds = get_suffix_lists(
cache=self._cache,
urls=self.suffix_list_urls,
cache_fetch_timeout=self.cache_fetch_timeout,
fallback_to_snapshot=self.fallback_to_snapshot,
session=session,
)
if not any([public_tlds, private_tlds, self.extra_suffixes]):
raise ValueError("No tlds set. Cannot proceed without tlds.")
self._extractor = _PublicSuffixListTLDExtractor(
public_tlds=public_tlds,
private_tlds=private_tlds,
extra_tlds=list(self.extra_suffixes),
include_psl_private_domains=self.include_psl_private_domains,
)
return self._extractor
|
(self, session: Optional[requests.sessions.Session] = None) -> tldextract.tldextract._PublicSuffixListTLDExtractor
|
39,926 |
tldextract.tldextract
|
extract_str
|
Take a string URL and splits it into its subdomain, domain, and suffix components.
I.e. its effective TLD, gTLD, ccTLD, etc. components.
>>> extractor = TLDExtract()
>>> extractor.extract_str('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
>>> extractor.extract_str('http://forums.bbc.co.uk/')
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk', is_private=False)
Allows configuring the HTTP request via the optional `session`
parameter. For example, if you need to use a HTTP proxy. See also
`requests.Session`.
>>> import requests
>>> session = requests.Session()
>>> # customize your session here
>>> with session:
... extractor.extract_str("http://forums.news.cnn.com/", session=session)
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
|
def extract_str(
self,
url: str,
include_psl_private_domains: bool | None = None,
session: requests.Session | None = None,
) -> ExtractResult:
"""Take a string URL and splits it into its subdomain, domain, and suffix components.
I.e. its effective TLD, gTLD, ccTLD, etc. components.
>>> extractor = TLDExtract()
>>> extractor.extract_str('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
>>> extractor.extract_str('http://forums.bbc.co.uk/')
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk', is_private=False)
Allows configuring the HTTP request via the optional `session`
parameter. For example, if you need to use a HTTP proxy. See also
`requests.Session`.
>>> import requests
>>> session = requests.Session()
>>> # customize your session here
>>> with session:
... extractor.extract_str("http://forums.news.cnn.com/", session=session)
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
"""
return self._extract_netloc(
lenient_netloc(url), include_psl_private_domains, session=session
)
|
(self, url: str, include_psl_private_domains: Optional[bool] = None, session: Optional[requests.sessions.Session] = None) -> tldextract.tldextract.ExtractResult
|
39,927 |
tldextract.tldextract
|
extract_urllib
|
Take the output of urllib.parse URL parsing methods and further splits the parsed URL.
Splits the parsed URL into its subdomain, domain, and suffix
components, i.e. its effective TLD, gTLD, ccTLD, etc. components.
This method is like `extract_str` but faster, as the string's domain
name has already been parsed.
>>> extractor = TLDExtract()
>>> extractor.extract_urllib(urllib.parse.urlsplit('http://forums.news.cnn.com/'))
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
>>> extractor.extract_urllib(urllib.parse.urlsplit('http://forums.bbc.co.uk/'))
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk', is_private=False)
|
def extract_urllib(
self,
url: urllib.parse.ParseResult | urllib.parse.SplitResult,
include_psl_private_domains: bool | None = None,
session: requests.Session | None = None,
) -> ExtractResult:
"""Take the output of urllib.parse URL parsing methods and further splits the parsed URL.
Splits the parsed URL into its subdomain, domain, and suffix
components, i.e. its effective TLD, gTLD, ccTLD, etc. components.
This method is like `extract_str` but faster, as the string's domain
name has already been parsed.
>>> extractor = TLDExtract()
>>> extractor.extract_urllib(urllib.parse.urlsplit('http://forums.news.cnn.com/'))
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com', is_private=False)
>>> extractor.extract_urllib(urllib.parse.urlsplit('http://forums.bbc.co.uk/'))
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk', is_private=False)
"""
return self._extract_netloc(
url.netloc, include_psl_private_domains, session=session
)
|
(self, url: urllib.parse.ParseResult | urllib.parse.SplitResult, include_psl_private_domains: Optional[bool] = None, session: Optional[requests.sessions.Session] = None) -> tldextract.tldextract.ExtractResult
|
39,928 |
tldextract.tldextract
|
update
|
Force fetch the latest suffix list definitions.
|
def update(
self, fetch_now: bool = False, session: requests.Session | None = None
) -> None:
"""Force fetch the latest suffix list definitions."""
self._extractor = None
self._cache.clear()
if fetch_now:
self._get_tld_extractor(session=session)
|
(self, fetch_now: bool = False, session: Optional[requests.sessions.Session] = None) -> NoneType
|
39,931 |
tldextract.tldextract
|
__call__
|
Alias for `extract_str`.
|
@wraps(TLD_EXTRACTOR.__call__)
def extract( # noqa: D103
url: str,
include_psl_private_domains: bool | None = False,
session: requests.Session | None = None,
) -> ExtractResult:
return TLD_EXTRACTOR(
url, include_psl_private_domains=include_psl_private_domains, session=session
)
|
(url: str, include_psl_private_domains: bool | None = None, session: Optional[requests.sessions.Session] = None) -> tldextract.tldextract.ExtractResult
|
39,935 |
s3path.old_versions
|
PureS3Path
|
PurePath subclass for AWS S3 service.
S3 is not a file-system but we can look at it like a POSIX system.
|
class PureS3Path(PurePath):
"""
PurePath subclass for AWS S3 service.
S3 is not a file-system but we can look at it like a POSIX system.
"""
_flavour = _s3_flavour
__slots__ = ()
@classmethod
def from_uri(cls, uri: str):
"""
from_uri class method create a class instance from url
>> from s3path import PureS3Path
>> PureS3Path.from_uri('s3://<bucket>/<key>')
<< PureS3Path('/<bucket>/<key>')
"""
if not uri.startswith('s3://'):
raise ValueError('Provided uri seems to be no S3 URI!')
unquoted_uri = unquote(uri)
return cls(unquoted_uri[4:])
@property
def bucket(self) -> str:
"""
The AWS S3 Bucket name, or ''
"""
self._absolute_path_validation()
with suppress(ValueError):
_, bucket, *_ = self.parts
return bucket
return ''
@property
def is_bucket(self) -> bool:
"""
Check if Path is a bucket
"""
return self.is_absolute() and self == PureS3Path(f"/{self.bucket}")
@property
def key(self) -> str:
"""
The AWS S3 Key name, or ''
"""
self._absolute_path_validation()
key = self._flavour.sep.join(self.parts[2:])
return key
@classmethod
def from_bucket_key(cls, bucket: str, key: str):
"""
from_bucket_key class method create a class instance from bucket, key pair's
>> from s3path import PureS3Path
>> PureS3Path.from_bucket_key(bucket='<bucket>', key='<key>')
<< PureS3Path('/<bucket>/<key>')
"""
bucket = cls(cls._flavour.sep, bucket)
if len(bucket.parts) != 2:
raise ValueError(f'bucket argument contains more then one path element: {bucket}')
key = cls(key)
if key.is_absolute():
key = key.relative_to('/')
return bucket / key
def as_uri(self) -> str:
"""
Return the path as a 's3' URI.
"""
return super().as_uri()
def _absolute_path_validation(self):
if not self.is_absolute():
raise ValueError('relative path have no bucket, key specification')
|
(*args)
|
39,950 |
s3path.old_versions
|
_absolute_path_validation
| null |
def _absolute_path_validation(self):
if not self.is_absolute():
raise ValueError('relative path have no bucket, key specification')
|
(self)
|
39,953 |
s3path.old_versions
|
as_uri
|
Return the path as a 's3' URI.
|
def as_uri(self) -> str:
"""
Return the path as a 's3' URI.
"""
return super().as_uri()
|
(self) -> str
|
39,963 |
s3path.old_versions
|
PureVersionedS3Path
|
PurePath subclass for AWS S3 service Keys with Versions.
S3 is not a file-system, but we can look at it like a POSIX system.
|
class PureVersionedS3Path(PureS3Path):
"""
PurePath subclass for AWS S3 service Keys with Versions.
S3 is not a file-system, but we can look at it like a POSIX system.
"""
def __new__(cls, *args, version_id: str):
self = super().__new__(cls, *args)
self.version_id = version_id
return self
@classmethod
def from_uri(cls, uri: str, *, version_id: str):
"""
from_uri class method creates a class instance from uri and version id
>> from s3path import VersionedS3Path
>> VersionedS3Path.from_uri('s3://<bucket>/<key>', version_id='<version_id>')
<< VersionedS3Path('/<bucket>/<key>', version_id='<version_id>')
"""
self = PureS3Path.from_uri(uri)
return cls(self, version_id=version_id)
@classmethod
def from_bucket_key(cls, bucket: str, key: str, *, version_id: str):
"""
from_bucket_key class method creates a class instance from bucket, key and version id
>> from s3path import VersionedS3Path
>> VersionedS3Path.from_bucket_key('<bucket>', '<key>', version_id='<version_id>')
<< VersionedS3Path('/<bucket>/<key>', version_id='<version_id>')
"""
self = PureS3Path.from_bucket_key(bucket=bucket, key=key)
return cls(self, version_id=version_id)
def __repr__(self) -> str:
return f'{type(self).__name__}({self.as_posix()}, version_id={self.version_id})'
def joinpath(self, *args):
if not args:
return self
new_path = super().joinpath(*args)
if isinstance(args[-1], PureVersionedS3Path):
new_path.version_id = args[-1].version_id
else:
new_path = S3Path(new_path)
return new_path
def __truediv__(self, key):
if not isinstance(key, (PureS3Path, str)):
return NotImplemented
key = S3Path(key) if isinstance(key, str) else key
return key.__rtruediv__(self)
def __rtruediv__(self, key):
if not isinstance(key, (PureS3Path, str)):
return NotImplemented
new_path = super().__rtruediv__(key)
new_path.version_id = self.version_id
return new_path
|
(*args, version_id: 'str')
|
39,972 |
s3path.old_versions
|
__new__
| null |
def __new__(cls, *args, version_id: str):
self = super().__new__(cls, *args)
self.version_id = version_id
return self
|
(cls, *args, version_id: str)
|
39,974 |
s3path.old_versions
|
__repr__
| null |
def __repr__(self) -> str:
return f'{type(self).__name__}({self.as_posix()}, version_id={self.version_id})'
|
(self) -> str
|
39,975 |
s3path.old_versions
|
__rtruediv__
| null |
def __rtruediv__(self, key):
if not isinstance(key, (PureS3Path, str)):
return NotImplemented
new_path = super().__rtruediv__(key)
new_path.version_id = self.version_id
return new_path
|
(self, key)
|
39,977 |
s3path.old_versions
|
__truediv__
| null |
def __truediv__(self, key):
if not isinstance(key, (PureS3Path, str)):
return NotImplemented
key = S3Path(key) if isinstance(key, str) else key
return key.__rtruediv__(self)
|
(self, key)
|
39,985 |
s3path.old_versions
|
joinpath
| null |
def joinpath(self, *args):
if not args:
return self
new_path = super().joinpath(*args)
if isinstance(args[-1], PureVersionedS3Path):
new_path.version_id = args[-1].version_id
else:
new_path = S3Path(new_path)
return new_path
|
(self, *args)
|
39,991 |
s3path.old_versions
|
S3Path
|
Path subclass for AWS S3 service.
S3Path provide a Python convenient File-System/Path like interface for AWS S3 Service
using boto3 S3 resource as a driver.
If boto3 isn't installed in your environment NotImplementedError will be raised.
|
class S3Path(_PathNotSupportedMixin, Path, PureS3Path):
"""
Path subclass for AWS S3 service.
S3Path provide a Python convenient File-System/Path like interface for AWS S3 Service
using boto3 S3 resource as a driver.
If boto3 isn't installed in your environment NotImplementedError will be raised.
"""
_accessor = _s3_accessor
__slots__ = ()
def _init(self, template=None):
super()._init(template)
if template is None:
self._accessor = _s3_accessor
def stat(self, *, follow_symlinks: bool = True) -> StatResult:
"""
Returns information about this path (similarly to boto3's ObjectSummary).
For compatibility with pathlib, the returned object some similar attributes like os.stat_result.
The result is looked up at each call to this method
"""
if not follow_symlinks:
raise NotImplementedError(
f'Setting follow_symlinks to {follow_symlinks} is unsupported on S3 service.')
self._absolute_path_validation()
if not self.key:
return None
return self._accessor.stat(self, follow_symlinks=follow_symlinks)
def exists(self) -> bool:
"""
Whether the path points to an existing Bucket, key or key prefix.
"""
self._absolute_path_validation()
if not self.bucket:
return True
return self._accessor.exists(self)
def is_dir(self) -> bool:
"""
Returns True if the path points to a Bucket or a key prefix, False if it points to a full key path.
False is also returned if the path doesn’t exist.
Other errors (such as permission errors) are propagated.
"""
self._absolute_path_validation()
if self.bucket and not self.key:
return True
return self._accessor.is_dir(self)
def is_file(self) -> bool:
"""
Returns True if the path points to a Bucket key, False if it points to Bucket or a key prefix.
False is also returned if the path doesn’t exist.
Other errors (such as permission errors) are propagated.
"""
self._absolute_path_validation()
if not self.bucket or not self.key:
return False
try:
return bool(self.stat())
except ClientError:
return False
def iterdir(self) -> Generator[S3Path, None, None]:
"""
When the path points to a Bucket or a key prefix, yield path objects of the directory contents
"""
self._absolute_path_validation()
for name in self._accessor.listdir(self):
yield self._make_child_relpath(name)
def glob(self, pattern: str) -> Generator[S3Path, None, None]:
"""
Glob the given relative pattern in the Bucket / key prefix represented by this path,
yielding all matching files (of any kind)
"""
self._absolute_path_validation()
general_options = self._accessor.configuration_map.get_general_options(self)
glob_new_algorithm = general_options['glob_new_algorithm']
if not glob_new_algorithm:
yield from super().glob(pattern)
return
yield from self._glob(pattern)
def _glob(self, pattern):
""" Glob with new Algorithm that better fit S3 API """
sys.audit("pathlib.Path.glob", self, pattern)
if not pattern:
raise ValueError(f'Unacceptable pattern: {pattern}')
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
for part in pattern_parts:
if part != '**' and '**' in part:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
selector = _Selector(self, pattern=pattern)
yield from selector.select()
def _scandir(self):
"""
Override _scandir so _Selector will rely on an S3 compliant implementation
"""
return self._accessor.scandir(self)
def rglob(self, pattern: str) -> Generator[S3Path, None, None]:
"""
This is like calling S3Path.glob with "**/" added in front of the given relative pattern
"""
self._absolute_path_validation()
general_options = self._accessor.configuration_map.get_general_options(self)
glob_new_algorithm = general_options['glob_new_algorithm']
if not glob_new_algorithm:
yield from super().rglob(pattern)
return
yield from self._rglob(pattern)
def _rglob(self, pattern):
""" RGlob with new Algorithm that better fit S3 API """
sys.audit("pathlib.Path.rglob", self, pattern)
if not pattern:
raise ValueError(f'Unacceptable pattern: {pattern}')
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
for part in pattern_parts:
if part != '**' and '**' in part:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
pattern = f'**{self._flavour.sep}{pattern}'
selector = _Selector(self, pattern=pattern)
yield from selector.select()
def open(
self,
mode: Literal["r", "w", "rb", "wb"] = 'r',
buffering: int = DEFAULT_BUFFER_SIZE,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None
) -> Union[TextIOWrapper, smart_open.s3.Reader, smart_open.s3.MultipartWriter]:
"""
Opens the Bucket key pointed to by the path, returns a Key file object that you can read/write with
"""
self._absolute_path_validation()
if smart_open.__version__ < '4.0.0' and mode.startswith('b'):
mode = ''.join(reversed(mode))
return self._accessor.open(
self,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline)
def owner(self) -> str:
"""
Returns the name of the user owning the Bucket or key.
Similarly to boto3's ObjectSummary owner attribute
"""
self._absolute_path_validation()
if not self.is_file():
return KeyError('file not found')
return self._accessor.owner(self)
def rename(self, target: Union[str, S3Path]) -> S3Path:
"""
Renames this file or Bucket / key prefix / key to the given target.
If target exists and is a file, it will be replaced silently if the user has permission.
If path is a key prefix, it will replace all the keys with the same prefix to the new target prefix.
Target can be either a string or another S3Path object.
"""
self._absolute_path_validation()
if not isinstance(target, type(self)):
target = type(self)(target)
target._absolute_path_validation()
self._accessor.rename(self, target)
return self.__class__(target)
def replace(self, target: Union[str, S3Path]) -> S3Path:
"""
Renames this Bucket / key prefix / key to the given target.
If target points to an existing Bucket / key prefix / key, it will be unconditionally replaced.
"""
return self.rename(target)
def unlink(self, missing_ok: bool = False):
"""
Remove this key from its bucket.
"""
self._absolute_path_validation()
# S3 doesn't care if you remove full prefixes or buckets with its delete API
# so unless we manually check, this call will be dropped through without any
# validation and could result in data loss
try:
if self.is_dir():
raise IsADirectoryError(str(self))
if not self.is_file():
raise FileNotFoundError(str(self))
except (IsADirectoryError, FileNotFoundError):
if missing_ok:
return
raise
try:
# XXX: Note: If we don't check if the file exists here, S3 will always return
# success even if we try to delete a key that doesn't exist. So, if we want
# to raise a `FileNotFoundError`, we need to manually check if the file exists
# before we make the API call -- since we want to delete the file anyway,
# we can just ignore this for now and be satisfied that the file will be removed
self._accessor.unlink(self)
except FileNotFoundError:
if not missing_ok:
raise
def rmdir(self):
"""
Removes this Bucket / key prefix. The Bucket / key prefix must be empty
"""
self._absolute_path_validation()
if self.is_file():
raise NotADirectoryError()
if not self.is_dir():
raise FileNotFoundError()
self._accessor.rmdir(self)
def samefile(self, other_path: Union[str, S3Path]) -> bool:
"""
Returns whether this path points to the same Bucket key as other_path,
Which can be either a Path object, or a string
"""
self._absolute_path_validation()
if not isinstance(other_path, Path):
other_path = type(self)(other_path)
return self.bucket == other_path.bucket and self.key == other_path.key and self.is_file()
def touch(self, mode: int = 0o666, exist_ok: bool = True):
"""
Creates a key at this given path.
If the key already exists,
the function succeeds if exist_ok is true (and its modification time is updated to the current time),
otherwise FileExistsError is raised
"""
if self.exists() and not exist_ok:
raise FileExistsError()
self.write_text('')
def mkdir(self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False):
"""
Create a path bucket.
AWS S3 Service doesn't support folders, therefore the mkdir method will only create the current bucket.
If the bucket path already exists, FileExistsError is raised.
If exist_ok is false (the default), FileExistsError is raised if the target Bucket already exists.
If exist_ok is true, OSError exceptions will be ignored.
if parents is false (the default), mkdir will create the bucket only if this is a Bucket path.
if parents is true, mkdir will create the bucket even if the path have a Key path.
mode argument is ignored.
"""
try:
if not self.bucket:
raise FileNotFoundError(f'No bucket in {type(self)} {self}')
if self.key and not parents:
raise FileNotFoundError(f'Only bucket path can be created, got {self}')
if type(self)(self._flavour.sep, self.bucket).exists():
raise FileExistsError(f'Bucket {self.bucket} already exists')
self._accessor.mkdir(self, mode)
except OSError:
if not exist_ok:
raise
def is_mount(self) -> Literal[False]:
"""
AWS S3 Service doesn't have mounting feature, There for this method will always return False
"""
return False
def is_symlink(self) -> Literal[False]:
"""
AWS S3 Service doesn't have symlink feature, There for this method will always return False
"""
return False
def is_socket(self) -> Literal[False]:
"""
AWS S3 Service doesn't have sockets feature, There for this method will always return False
"""
return False
def is_fifo(self) -> Literal[False]:
"""
AWS S3 Service doesn't have fifo feature, There for this method will always return False
"""
return False
def absolute(self) -> S3Path:
"""
Handle absolute method only if the path is already an absolute one
since we have no way to compute an absolute path from a relative one in S3.
"""
if self.is_absolute():
return self
# We can't compute the absolute path from a relative one
raise ValueError("Absolute path can't be determined for relative S3Path objects")
def get_presigned_url(self, expire_in: Union[timedelta, int] = 3600) -> str:
"""
Returns a pre-signed url. Anyone with the url can make a GET request to get the file.
You can set an expiration date with the expire_in argument (integer or timedelta object).
Note that generating a presigned url may require more information or setup than to use other
S3Path functions. It's because it needs to know the exact aws region and use s3v4 as signature
version. Meaning you may have to do this:
```python
import boto3
from botocore.config import Config
from s3path import S3Path, register_configuration_parameter
resource = boto3.resource(
"s3",
config=Config(signature_version="s3v4"),
region_name="the aws region name"
)
register_configuration_parameter(S3Path("/"), resource=resource)
```
A simple example:
```python
from s3path import S3Path
import requests
file = S3Path("/my-bucket/toto.txt")
file.write_text("hello world")
presigned_url = file.get_presigned_url()
print(requests.get(presigned_url).content)
b"hello world"
"""
self._absolute_path_validation()
if isinstance(expire_in, timedelta):
expire_in = int(expire_in.total_seconds())
if expire_in <= 0:
raise ValueError(
f"The expire_in argument can't represent a negative or null time delta. "
f"You provided expire_in = {expire_in} seconds which is below or equal to 0 seconds.")
return self._accessor.get_presigned_url(self, expire_in)
|
(*args, **kwargs)
|
40,009 |
s3path.old_versions
|
_glob
|
Glob with new Algorithm that better fit S3 API
|
def _glob(self, pattern):
""" Glob with new Algorithm that better fit S3 API """
sys.audit("pathlib.Path.glob", self, pattern)
if not pattern:
raise ValueError(f'Unacceptable pattern: {pattern}')
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
for part in pattern_parts:
if part != '**' and '**' in part:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
selector = _Selector(self, pattern=pattern)
yield from selector.select()
|
(self, pattern)
|
40,010 |
s3path.old_versions
|
_init
| null |
def _init(self, template=None):
super()._init(template)
if template is None:
self._accessor = _s3_accessor
|
(self, template=None)
|
40,013 |
s3path.old_versions
|
_rglob
|
RGlob with new Algorithm that better fit S3 API
|
def _rglob(self, pattern):
""" RGlob with new Algorithm that better fit S3 API """
sys.audit("pathlib.Path.rglob", self, pattern)
if not pattern:
raise ValueError(f'Unacceptable pattern: {pattern}')
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
for part in pattern_parts:
if part != '**' and '**' in part:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
pattern = f'**{self._flavour.sep}{pattern}'
selector = _Selector(self, pattern=pattern)
yield from selector.select()
|
(self, pattern)
|
40,014 |
s3path.old_versions
|
_scandir
|
Override _scandir so _Selector will rely on an S3 compliant implementation
|
def _scandir(self):
"""
Override _scandir so _Selector will rely on an S3 compliant implementation
"""
return self._accessor.scandir(self)
|
(self)
|
40,015 |
s3path.old_versions
|
absolute
|
Handle absolute method only if the path is already an absolute one
since we have no way to compute an absolute path from a relative one in S3.
|
def absolute(self) -> S3Path:
"""
Handle absolute method only if the path is already an absolute one
since we have no way to compute an absolute path from a relative one in S3.
"""
if self.is_absolute():
return self
# We can't compute the absolute path from a relative one
raise ValueError("Absolute path can't be determined for relative S3Path objects")
|
(self) -> s3path.old_versions.S3Path
|
40,018 |
s3path.old_versions
|
chmod
|
chmod method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def chmod(self, mode, *, follow_symlinks=True):
"""
chmod method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.chmod.__qualname__)
raise NotImplementedError(message)
|
(self, mode, *, follow_symlinks=True)
|
40,019 |
s3path.old_versions
|
exists
|
Whether the path points to an existing Bucket, key or key prefix.
|
def exists(self) -> bool:
"""
Whether the path points to an existing Bucket, key or key prefix.
"""
self._absolute_path_validation()
if not self.bucket:
return True
return self._accessor.exists(self)
|
(self) -> bool
|
40,020 |
s3path.old_versions
|
expanduser
|
expanduser method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def expanduser(self):
"""
expanduser method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.expanduser.__qualname__)
raise NotImplementedError(message)
|
(self)
|
40,021 |
s3path.old_versions
|
get_presigned_url
|
Returns a pre-signed url. Anyone with the url can make a GET request to get the file.
You can set an expiration date with the expire_in argument (integer or timedelta object).
Note that generating a presigned url may require more information or setup than to use other
S3Path functions. It's because it needs to know the exact aws region and use s3v4 as signature
version. Meaning you may have to do this:
```python
import boto3
from botocore.config import Config
from s3path import S3Path, register_configuration_parameter
resource = boto3.resource(
"s3",
config=Config(signature_version="s3v4"),
region_name="the aws region name"
)
register_configuration_parameter(S3Path("/"), resource=resource)
```
A simple example:
```python
from s3path import S3Path
import requests
file = S3Path("/my-bucket/toto.txt")
file.write_text("hello world")
presigned_url = file.get_presigned_url()
print(requests.get(presigned_url).content)
b"hello world"
|
def get_presigned_url(self, expire_in: Union[timedelta, int] = 3600) -> str:
"""
Returns a pre-signed url. Anyone with the url can make a GET request to get the file.
You can set an expiration date with the expire_in argument (integer or timedelta object).
Note that generating a presigned url may require more information or setup than to use other
S3Path functions. It's because it needs to know the exact aws region and use s3v4 as signature
version. Meaning you may have to do this:
```python
import boto3
from botocore.config import Config
from s3path import S3Path, register_configuration_parameter
resource = boto3.resource(
"s3",
config=Config(signature_version="s3v4"),
region_name="the aws region name"
)
register_configuration_parameter(S3Path("/"), resource=resource)
```
A simple example:
```python
from s3path import S3Path
import requests
file = S3Path("/my-bucket/toto.txt")
file.write_text("hello world")
presigned_url = file.get_presigned_url()
print(requests.get(presigned_url).content)
b"hello world"
"""
self._absolute_path_validation()
if isinstance(expire_in, timedelta):
expire_in = int(expire_in.total_seconds())
if expire_in <= 0:
raise ValueError(
f"The expire_in argument can't represent a negative or null time delta. "
f"You provided expire_in = {expire_in} seconds which is below or equal to 0 seconds.")
return self._accessor.get_presigned_url(self, expire_in)
|
(self, expire_in: Union[datetime.timedelta, int] = 3600) -> str
|
40,022 |
s3path.old_versions
|
glob
|
Glob the given relative pattern in the Bucket / key prefix represented by this path,
yielding all matching files (of any kind)
|
def glob(self, pattern: str) -> Generator[S3Path, None, None]:
"""
Glob the given relative pattern in the Bucket / key prefix represented by this path,
yielding all matching files (of any kind)
"""
self._absolute_path_validation()
general_options = self._accessor.configuration_map.get_general_options(self)
glob_new_algorithm = general_options['glob_new_algorithm']
if not glob_new_algorithm:
yield from super().glob(pattern)
return
yield from self._glob(pattern)
|
(self, pattern: str) -> Generator[s3path.old_versions.S3Path, NoneType, NoneType]
|
40,023 |
s3path.old_versions
|
group
|
group method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def group(self):
"""
group method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.group.__qualname__)
raise NotImplementedError(message)
|
(self)
|
40,024 |
s3path.old_versions
|
hardlink_to
|
hardlink_to method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def hardlink_to(self, *args, **kwargs):
"""
hardlink_to method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.hardlink_to.__qualname__)
raise NotImplementedError(message)
|
(self, *args, **kwargs)
|
40,026 |
s3path.old_versions
|
is_block_device
|
is_block_device method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def is_block_device(self):
"""
is_block_device method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.is_block_device.__qualname__)
raise NotImplementedError(message)
|
(self)
|
40,027 |
s3path.old_versions
|
is_char_device
|
is_char_device method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def is_char_device(self):
"""
is_char_device method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.is_char_device.__qualname__)
raise NotImplementedError(message)
|
(self)
|
40,028 |
s3path.old_versions
|
is_dir
|
Returns True if the path points to a Bucket or a key prefix, False if it points to a full key path.
False is also returned if the path doesn’t exist.
Other errors (such as permission errors) are propagated.
|
def is_dir(self) -> bool:
"""
Returns True if the path points to a Bucket or a key prefix, False if it points to a full key path.
False is also returned if the path doesn’t exist.
Other errors (such as permission errors) are propagated.
"""
self._absolute_path_validation()
if self.bucket and not self.key:
return True
return self._accessor.is_dir(self)
|
(self) -> bool
|
40,029 |
s3path.old_versions
|
is_fifo
|
AWS S3 Service doesn't have fifo feature, There for this method will always return False
|
def is_fifo(self) -> Literal[False]:
"""
AWS S3 Service doesn't have fifo feature, There for this method will always return False
"""
return False
|
(self) -> Literal[False]
|
40,030 |
s3path.old_versions
|
is_file
|
Returns True if the path points to a Bucket key, False if it points to Bucket or a key prefix.
False is also returned if the path doesn’t exist.
Other errors (such as permission errors) are propagated.
|
def is_file(self) -> bool:
"""
Returns True if the path points to a Bucket key, False if it points to Bucket or a key prefix.
False is also returned if the path doesn’t exist.
Other errors (such as permission errors) are propagated.
"""
self._absolute_path_validation()
if not self.bucket or not self.key:
return False
try:
return bool(self.stat())
except ClientError:
return False
|
(self) -> bool
|
40,031 |
s3path.old_versions
|
is_mount
|
AWS S3 Service doesn't have mounting feature, There for this method will always return False
|
def is_mount(self) -> Literal[False]:
"""
AWS S3 Service doesn't have mounting feature, There for this method will always return False
"""
return False
|
(self) -> Literal[False]
|
40,034 |
s3path.old_versions
|
is_socket
|
AWS S3 Service doesn't have sockets feature, There for this method will always return False
|
def is_socket(self) -> Literal[False]:
"""
AWS S3 Service doesn't have sockets feature, There for this method will always return False
"""
return False
|
(self) -> Literal[False]
|
40,035 |
s3path.old_versions
|
is_symlink
|
AWS S3 Service doesn't have symlink feature, There for this method will always return False
|
def is_symlink(self) -> Literal[False]:
"""
AWS S3 Service doesn't have symlink feature, There for this method will always return False
"""
return False
|
(self) -> Literal[False]
|
40,036 |
s3path.old_versions
|
iterdir
|
When the path points to a Bucket or a key prefix, yield path objects of the directory contents
|
def iterdir(self) -> Generator[S3Path, None, None]:
"""
When the path points to a Bucket or a key prefix, yield path objects of the directory contents
"""
self._absolute_path_validation()
for name in self._accessor.listdir(self):
yield self._make_child_relpath(name)
|
(self) -> Generator[s3path.old_versions.S3Path, NoneType, NoneType]
|
40,038 |
s3path.old_versions
|
lchmod
|
lchmod method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def lchmod(self, mode):
"""
lchmod method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.lchmod.__qualname__)
raise NotImplementedError(message)
|
(self, mode)
|
40,040 |
s3path.old_versions
|
lstat
|
lstat method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def lstat(self):
"""
lstat method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.lstat.__qualname__)
raise NotImplementedError(message)
|
(self)
|
40,042 |
s3path.old_versions
|
mkdir
|
Create a path bucket.
AWS S3 Service doesn't support folders, therefore the mkdir method will only create the current bucket.
If the bucket path already exists, FileExistsError is raised.
If exist_ok is false (the default), FileExistsError is raised if the target Bucket already exists.
If exist_ok is true, OSError exceptions will be ignored.
if parents is false (the default), mkdir will create the bucket only if this is a Bucket path.
if parents is true, mkdir will create the bucket even if the path have a Key path.
mode argument is ignored.
|
def mkdir(self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False):
"""
Create a path bucket.
AWS S3 Service doesn't support folders, therefore the mkdir method will only create the current bucket.
If the bucket path already exists, FileExistsError is raised.
If exist_ok is false (the default), FileExistsError is raised if the target Bucket already exists.
If exist_ok is true, OSError exceptions will be ignored.
if parents is false (the default), mkdir will create the bucket only if this is a Bucket path.
if parents is true, mkdir will create the bucket even if the path have a Key path.
mode argument is ignored.
"""
try:
if not self.bucket:
raise FileNotFoundError(f'No bucket in {type(self)} {self}')
if self.key and not parents:
raise FileNotFoundError(f'Only bucket path can be created, got {self}')
if type(self)(self._flavour.sep, self.bucket).exists():
raise FileExistsError(f'Bucket {self.bucket} already exists')
self._accessor.mkdir(self, mode)
except OSError:
if not exist_ok:
raise
|
(self, mode: int = 511, parents: bool = False, exist_ok: bool = False)
|
40,043 |
s3path.old_versions
|
open
|
Opens the Bucket key pointed to by the path, returns a Key file object that you can read/write with
|
def open(
self,
mode: Literal["r", "w", "rb", "wb"] = 'r',
buffering: int = DEFAULT_BUFFER_SIZE,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None
) -> Union[TextIOWrapper, smart_open.s3.Reader, smart_open.s3.MultipartWriter]:
"""
Opens the Bucket key pointed to by the path, returns a Key file object that you can read/write with
"""
self._absolute_path_validation()
if smart_open.__version__ < '4.0.0' and mode.startswith('b'):
mode = ''.join(reversed(mode))
return self._accessor.open(
self,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline)
|
(self, mode: Literal['r', 'w', 'rb', 'wb'] = 'r', buffering: int = 8192, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None) -> Union[_io.TextIOWrapper, smart_open.s3.Reader, smart_open.s3.MultipartWriter]
|
40,044 |
s3path.old_versions
|
owner
|
Returns the name of the user owning the Bucket or key.
Similarly to boto3's ObjectSummary owner attribute
|
def owner(self) -> str:
"""
Returns the name of the user owning the Bucket or key.
Similarly to boto3's ObjectSummary owner attribute
"""
self._absolute_path_validation()
if not self.is_file():
return KeyError('file not found')
return self._accessor.owner(self)
|
(self) -> str
|
40,047 |
s3path.old_versions
|
readlink
|
readlink method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def readlink(self):
"""
readlink method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.readlink.__qualname__)
raise NotImplementedError(message)
|
(self)
|
40,049 |
s3path.old_versions
|
rename
|
Renames this file or Bucket / key prefix / key to the given target.
If target exists and is a file, it will be replaced silently if the user has permission.
If path is a key prefix, it will replace all the keys with the same prefix to the new target prefix.
Target can be either a string or another S3Path object.
|
def rename(self, target: Union[str, S3Path]) -> S3Path:
"""
Renames this file or Bucket / key prefix / key to the given target.
If target exists and is a file, it will be replaced silently if the user has permission.
If path is a key prefix, it will replace all the keys with the same prefix to the new target prefix.
Target can be either a string or another S3Path object.
"""
self._absolute_path_validation()
if not isinstance(target, type(self)):
target = type(self)(target)
target._absolute_path_validation()
self._accessor.rename(self, target)
return self.__class__(target)
|
(self, target: Union[str, s3path.old_versions.S3Path]) -> s3path.old_versions.S3Path
|
40,050 |
s3path.old_versions
|
replace
|
Renames this Bucket / key prefix / key to the given target.
If target points to an existing Bucket / key prefix / key, it will be unconditionally replaced.
|
def replace(self, target: Union[str, S3Path]) -> S3Path:
"""
Renames this Bucket / key prefix / key to the given target.
If target points to an existing Bucket / key prefix / key, it will be unconditionally replaced.
"""
return self.rename(target)
|
(self, target: Union[str, s3path.old_versions.S3Path]) -> s3path.old_versions.S3Path
|
40,051 |
s3path.old_versions
|
resolve
|
resolve method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def resolve(self):
"""
resolve method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.resolve.__qualname__)
raise NotImplementedError(message)
|
(self)
|
40,052 |
s3path.old_versions
|
rglob
|
This is like calling S3Path.glob with "**/" added in front of the given relative pattern
|
def rglob(self, pattern: str) -> Generator[S3Path, None, None]:
"""
This is like calling S3Path.glob with "**/" added in front of the given relative pattern
"""
self._absolute_path_validation()
general_options = self._accessor.configuration_map.get_general_options(self)
glob_new_algorithm = general_options['glob_new_algorithm']
if not glob_new_algorithm:
yield from super().rglob(pattern)
return
yield from self._rglob(pattern)
|
(self, pattern: str) -> Generator[s3path.old_versions.S3Path, NoneType, NoneType]
|
40,053 |
s3path.old_versions
|
rmdir
|
Removes this Bucket / key prefix. The Bucket / key prefix must be empty
|
def rmdir(self):
"""
Removes this Bucket / key prefix. The Bucket / key prefix must be empty
"""
self._absolute_path_validation()
if self.is_file():
raise NotADirectoryError()
if not self.is_dir():
raise FileNotFoundError()
self._accessor.rmdir(self)
|
(self)
|
40,054 |
s3path.old_versions
|
samefile
|
Returns whether this path points to the same Bucket key as other_path,
Which can be either a Path object, or a string
|
def samefile(self, other_path: Union[str, S3Path]) -> bool:
"""
Returns whether this path points to the same Bucket key as other_path,
Which can be either a Path object, or a string
"""
self._absolute_path_validation()
if not isinstance(other_path, Path):
other_path = type(self)(other_path)
return self.bucket == other_path.bucket and self.key == other_path.key and self.is_file()
|
(self, other_path: Union[str, s3path.old_versions.S3Path]) -> bool
|
40,055 |
s3path.old_versions
|
stat
|
Returns information about this path (similarly to boto3's ObjectSummary).
For compatibility with pathlib, the returned object some similar attributes like os.stat_result.
The result is looked up at each call to this method
|
def stat(self, *, follow_symlinks: bool = True) -> StatResult:
"""
Returns information about this path (similarly to boto3's ObjectSummary).
For compatibility with pathlib, the returned object some similar attributes like os.stat_result.
The result is looked up at each call to this method
"""
if not follow_symlinks:
raise NotImplementedError(
f'Setting follow_symlinks to {follow_symlinks} is unsupported on S3 service.')
self._absolute_path_validation()
if not self.key:
return None
return self._accessor.stat(self, follow_symlinks=follow_symlinks)
|
(self, *, follow_symlinks: bool = True) -> s3path.old_versions.StatResult
|
40,056 |
s3path.old_versions
|
symlink_to
|
symlink_to method is unsupported on S3 service
AWS S3 don't have this file system action concept
|
def symlink_to(self, *args, **kwargs):
"""
symlink_to method is unsupported on S3 service
AWS S3 don't have this file system action concept
"""
message = self._NOT_SUPPORTED_MESSAGE.format(method=self.symlink_to.__qualname__)
raise NotImplementedError(message)
|
(self, *args, **kwargs)
|
40,057 |
s3path.old_versions
|
touch
|
Creates a key at this given path.
If the key already exists,
the function succeeds if exist_ok is true (and its modification time is updated to the current time),
otherwise FileExistsError is raised
|
def touch(self, mode: int = 0o666, exist_ok: bool = True):
"""
Creates a key at this given path.
If the key already exists,
the function succeeds if exist_ok is true (and its modification time is updated to the current time),
otherwise FileExistsError is raised
"""
if self.exists() and not exist_ok:
raise FileExistsError()
self.write_text('')
|
(self, mode: int = 438, exist_ok: bool = True)
|
40,058 |
s3path.old_versions
|
unlink
|
Remove this key from its bucket.
|
def unlink(self, missing_ok: bool = False):
"""
Remove this key from its bucket.
"""
self._absolute_path_validation()
# S3 doesn't care if you remove full prefixes or buckets with its delete API
# so unless we manually check, this call will be dropped through without any
# validation and could result in data loss
try:
if self.is_dir():
raise IsADirectoryError(str(self))
if not self.is_file():
raise FileNotFoundError(str(self))
except (IsADirectoryError, FileNotFoundError):
if missing_ok:
return
raise
try:
# XXX: Note: If we don't check if the file exists here, S3 will always return
# success even if we try to delete a key that doesn't exist. So, if we want
# to raise a `FileNotFoundError`, we need to manually check if the file exists
# before we make the API call -- since we want to delete the file anyway,
# we can just ignore this for now and be satisfied that the file will be removed
self._accessor.unlink(self)
except FileNotFoundError:
if not missing_ok:
raise
|
(self, missing_ok: bool = False)
|
40,064 |
s3path.old_versions
|
StatResult
|
Base of os.stat_result but with boto3 s3 features
|
class StatResult(namedtuple('BaseStatResult', 'size, last_modified, version_id', defaults=(None,))):
"""
Base of os.stat_result but with boto3 s3 features
"""
def __getattr__(self, item):
if item in vars(stat_result):
raise UnsupportedOperation(f'{type(self).__name__} do not support {item} attribute')
return super().__getattribute__(item)
@property
def st_size(self) -> int:
return self.size
@property
def st_mtime(self) -> float:
return self.last_modified.timestamp()
@property
def st_version_id(self) -> str:
return self.version_id
|
(size, last_modified, version_id=None)
|
40,065 |
s3path.old_versions
|
__getattr__
| null |
def __getattr__(self, item):
if item in vars(stat_result):
raise UnsupportedOperation(f'{type(self).__name__} do not support {item} attribute')
return super().__getattribute__(item)
|
(self, item)
|
40,067 |
namedtuple_BaseStatResult
|
__new__
|
Create new instance of BaseStatResult(size, last_modified, version_id)
|
from builtins import function
|
(_cls, size, last_modified, version_id=None)
|
40,070 |
collections
|
_replace
|
Return a new BaseStatResult object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
40,071 |
s3path.old_versions
|
VersionedS3Path
|
S3Path subclass for AWS S3 service Keys with Versions.
>> from s3path import VersionedS3Path
>> VersionedS3Path('/<bucket>/<key>', version_id='<version_id>')
<< VersionedS3Path('/<bucket>/<key>', version_id='<version_id>')
|
class VersionedS3Path(PureVersionedS3Path, S3Path):
"""
S3Path subclass for AWS S3 service Keys with Versions.
>> from s3path import VersionedS3Path
>> VersionedS3Path('/<bucket>/<key>', version_id='<version_id>')
<< VersionedS3Path('/<bucket>/<key>', version_id='<version_id>')
"""
_accessor = _versioned_s3_accessor
def _init(self, template=None):
super()._init(template)
if template is None:
self._accessor = _versioned_s3_accessor
|
(*args, version_id: 'str')
|
40,090 |
s3path.old_versions
|
_init
| null |
def _init(self, template=None):
super()._init(template)
if template is None:
self._accessor = _versioned_s3_accessor
|
(self, template=None)
|
40,146 |
s3path.old_versions
|
register_configuration_parameter
| null |
def register_configuration_parameter(
path: PureS3Path,
*,
parameters: Optional[dict] = None,
resource: Optional[ServiceResource] = None,
glob_new_algorithm: Optional[bool] = None):
if not isinstance(path, PureS3Path):
raise TypeError(f'path argument have to be a {PurePath} type. got {type(path)}')
if parameters and not isinstance(parameters, dict):
raise TypeError(f'parameters argument have to be a dict type. got {type(path)}')
if parameters is None and resource is None and glob_new_algorithm is None:
raise ValueError('user have to specify parameters or resource arguments')
_s3_accessor.configuration_map.set_configuration(
path,
resource=resource,
arguments=parameters,
glob_new_algorithm=glob_new_algorithm)
|
(path: s3path.old_versions.PureS3Path, *, parameters: Optional[dict] = None, resource: Optional[boto3.resources.base.ServiceResource] = None, glob_new_algorithm: Optional[bool] = None)
|
40,148 |
filigran_sseclient.sseclient
|
SSEClient
| null |
class SSEClient(object):
def __init__(
self, url, last_id=None, retry=3000, session=None, chunk_size=1024, **kwargs
):
self.url = url
self.last_id = last_id
self.retry = retry
self.chunk_size = chunk_size
# Optional support for passing in a requests.Session()
self.session = session
# Any extra kwargs will be fed into the requests.get call later.
self.requests_kwargs = kwargs
# The SSE spec requires making requests with Cache-Control: nocache
if "headers" not in self.requests_kwargs:
self.requests_kwargs["headers"] = {}
self.requests_kwargs["headers"]["Cache-Control"] = "no-cache"
# The 'Accept' header is not required, but explicit > implicit
self.requests_kwargs["headers"]["Accept"] = "text/event-stream"
# Keep data here as it streams in
self.buf = ""
self._connect()
def _connect(self):
if self.last_id:
self.requests_kwargs["headers"]["Last-Event-ID"] = self.last_id
# Use session if set. Otherwise fall back to requests module.
requester = self.session or requests
self.resp = requester.get(self.url, stream=True, **self.requests_kwargs)
transfer_encoding = self.resp.headers.get("transfer-encoding", "")
if transfer_encoding.lower() == "chunked":
self.resp_iterator = self.resp.iter_content(chunk_size=None)
else:
self.resp_iterator = self.iter_content()
encoding = self.resp.encoding or self.resp.apparent_encoding
self.decoder = codecs.getincrementaldecoder(encoding)(errors="replace")
# TODO: Ensure we're handling redirects. Might also stick the 'origin'
# attribute on Events like the Javascript spec requires.
self.resp.raise_for_status()
def iter_content(self):
def generate():
while True:
if (
hasattr(self.resp.raw, "_fp")
and hasattr(self.resp.raw._fp, "fp")
and hasattr(self.resp.raw._fp.fp, "read1")
):
chunk = self.resp.raw._fp.fp.read1(self.chunk_size)
else:
# _fp is not available, this means that we cannot use short
# reads and this will block until the full chunk size is
# actually read
chunk = self.resp.raw.read(self.chunk_size)
if not chunk:
break
yield chunk
return generate()
def _event_complete(self):
return re.search(end_of_field, self.buf) is not None
def __iter__(self):
return self
def __next__(self):
while not self._event_complete():
try:
next_chunk = next(self.resp_iterator)
if not next_chunk:
raise EOFError()
self.buf += self.decoder.decode(next_chunk)
except (
StopIteration,
requests.RequestException,
EOFError,
six.moves.http_client.IncompleteRead,
) as e:
print(e)
time.sleep(self.retry / 1000.0)
self._connect()
# The SSE spec only supports resuming from a whole message, so
# if we have half a message we should throw it out.
head, sep, tail = self.buf.rpartition("\n")
self.buf = head + sep
continue
# Split the complete event (up to the end_of_field) into event_string,
# and retain anything after the current complete event in self.buf
# for next time.
(event_string, self.buf) = re.split(end_of_field, self.buf, maxsplit=1)
msg = Event.parse(event_string)
# If the server requests a specific retry delay, we need to honor it.
if msg.retry:
self.retry = msg.retry
# last_id should only be set if included in the message. It's not
# forgotten if a message omits it.
if msg.id:
self.last_id = msg.id
return msg
if six.PY2:
next = __next__
|
(url, last_id=None, retry=3000, session=None, chunk_size=1024, **kwargs)
|
40,149 |
filigran_sseclient.sseclient
|
__init__
| null |
def __init__(
self, url, last_id=None, retry=3000, session=None, chunk_size=1024, **kwargs
):
self.url = url
self.last_id = last_id
self.retry = retry
self.chunk_size = chunk_size
# Optional support for passing in a requests.Session()
self.session = session
# Any extra kwargs will be fed into the requests.get call later.
self.requests_kwargs = kwargs
# The SSE spec requires making requests with Cache-Control: nocache
if "headers" not in self.requests_kwargs:
self.requests_kwargs["headers"] = {}
self.requests_kwargs["headers"]["Cache-Control"] = "no-cache"
# The 'Accept' header is not required, but explicit > implicit
self.requests_kwargs["headers"]["Accept"] = "text/event-stream"
# Keep data here as it streams in
self.buf = ""
self._connect()
|
(self, url, last_id=None, retry=3000, session=None, chunk_size=1024, **kwargs)
|
40,151 |
filigran_sseclient.sseclient
|
__next__
| null |
def __next__(self):
while not self._event_complete():
try:
next_chunk = next(self.resp_iterator)
if not next_chunk:
raise EOFError()
self.buf += self.decoder.decode(next_chunk)
except (
StopIteration,
requests.RequestException,
EOFError,
six.moves.http_client.IncompleteRead,
) as e:
print(e)
time.sleep(self.retry / 1000.0)
self._connect()
# The SSE spec only supports resuming from a whole message, so
# if we have half a message we should throw it out.
head, sep, tail = self.buf.rpartition("\n")
self.buf = head + sep
continue
# Split the complete event (up to the end_of_field) into event_string,
# and retain anything after the current complete event in self.buf
# for next time.
(event_string, self.buf) = re.split(end_of_field, self.buf, maxsplit=1)
msg = Event.parse(event_string)
# If the server requests a specific retry delay, we need to honor it.
if msg.retry:
self.retry = msg.retry
# last_id should only be set if included in the message. It's not
# forgotten if a message omits it.
if msg.id:
self.last_id = msg.id
return msg
|
(self)
|
40,152 |
filigran_sseclient.sseclient
|
_connect
| null |
def _connect(self):
if self.last_id:
self.requests_kwargs["headers"]["Last-Event-ID"] = self.last_id
# Use session if set. Otherwise fall back to requests module.
requester = self.session or requests
self.resp = requester.get(self.url, stream=True, **self.requests_kwargs)
transfer_encoding = self.resp.headers.get("transfer-encoding", "")
if transfer_encoding.lower() == "chunked":
self.resp_iterator = self.resp.iter_content(chunk_size=None)
else:
self.resp_iterator = self.iter_content()
encoding = self.resp.encoding or self.resp.apparent_encoding
self.decoder = codecs.getincrementaldecoder(encoding)(errors="replace")
# TODO: Ensure we're handling redirects. Might also stick the 'origin'
# attribute on Events like the Javascript spec requires.
self.resp.raise_for_status()
|
(self)
|
40,153 |
filigran_sseclient.sseclient
|
_event_complete
| null |
def _event_complete(self):
return re.search(end_of_field, self.buf) is not None
|
(self)
|
40,154 |
filigran_sseclient.sseclient
|
iter_content
| null |
def iter_content(self):
def generate():
while True:
if (
hasattr(self.resp.raw, "_fp")
and hasattr(self.resp.raw._fp, "fp")
and hasattr(self.resp.raw._fp.fp, "read1")
):
chunk = self.resp.raw._fp.fp.read1(self.chunk_size)
else:
# _fp is not available, this means that we cannot use short
# reads and this will block until the full chunk size is
# actually read
chunk = self.resp.raw.read(self.chunk_size)
if not chunk:
break
yield chunk
return generate()
|
(self)
|
40,156 |
typesense.client
|
Client
| null |
class Client(object):
def __init__(self, config_dict):
self.config = Configuration(config_dict)
self.api_call = ApiCall(self.config)
self.collections = Collections(self.api_call)
self.multi_search = MultiSearch(self.api_call)
self.keys = Keys(self.api_call)
self.aliases = Aliases(self.api_call)
self.analytics = Analytics(self.api_call)
self.operations = Operations(self.api_call)
self.debug = Debug(self.api_call)
self.stopwords = Stopwords(self.api_call)
self.conversations_models = ConversationsModels(self.api_call)
|
(config_dict)
|
40,157 |
typesense.client
|
__init__
| null |
def __init__(self, config_dict):
self.config = Configuration(config_dict)
self.api_call = ApiCall(self.config)
self.collections = Collections(self.api_call)
self.multi_search = MultiSearch(self.api_call)
self.keys = Keys(self.api_call)
self.aliases = Aliases(self.api_call)
self.analytics = Analytics(self.api_call)
self.operations = Operations(self.api_call)
self.debug = Debug(self.api_call)
self.stopwords = Stopwords(self.api_call)
self.conversations_models = ConversationsModels(self.api_call)
|
(self, config_dict)
|
40,187 |
flox.aggregations
|
Aggregation
| null |
class Aggregation:
def __init__(
self,
name,
*,
numpy: str | FuncTuple | None = None,
chunk: str | FuncTuple | None,
combine: str | FuncTuple | None,
preprocess: Callable | None = None,
finalize: Callable | None = None,
fill_value=None,
final_fill_value=dtypes.NA,
dtypes=None,
final_dtype: DTypeLike | None = None,
reduction_type: Literal["reduce", "argreduce"] = "reduce",
new_dims_func: Callable | None = None,
):
"""
Blueprint for computing grouped aggregations.
See aggregations.py for examples on how to specify reductions.
Attributes
----------
name : str
Name of reduction.
numpy : str or callable, optional
Reduction function applied to numpy inputs. This function should
compute the grouped reduction and must have a specific signature.
If string, these must be "native" reductions implemented by the backend
engines (numpy_groupies, flox, numbagg). If None, will be set to ``name``.
chunk : None or str or tuple of str or callable or tuple of callable
For dask inputs only. Either a single function or a list of
functions to be applied blockwise on the input dask array. If None, will raise
an error for dask inputs.
combine : None or str or tuple of str or callbe or tuple of callable
For dask inputs only. Functions applied when combining intermediate
results from the blockwise stage (see ``chunk``). If None, will raise an error
for dask inputs.
finalize : callable
For dask inputs only. Function that combines intermediate results to compute
final result.
preprocess : callable
For dask inputs only. Preprocess inputs before ``chunk`` stage.
reduction_type : {"reduce", "argreduce"}
Type of reduction.
fill_value : number or tuple(number), optional
Value to use when a group has no members. If single value will be converted
to tuple of same length as chunk. If appropriate, provide a different fill_value
per reduction in ``chunk`` as a tuple.
final_fill_value : optional
fill_value for final result.
dtypes : DType or tuple(DType), optional
dtypes for intermediate results. If single value, will be converted to a tuple
of same length as chunk. If appropriate, provide a different fill_value
per reduction in ``chunk`` as a tuple.
final_dtype : DType, optional
DType for output. By default, uses dtype of array being reduced.
new_dims_func: Callable
Function that receives finalize_kwargs and returns a tupleof sizes of any new dimensions
added by the reduction. For e.g. quantile for q=(0.5, 0.85) adds a new dimension of size 2,
so returns (2,)
"""
self.name = name
# preprocess before blockwise
self.preprocess = preprocess
# Use "chunk_reduce" or "chunk_argreduce"
self.reduction_type = reduction_type
self.numpy: FuncTuple = (numpy,) if numpy else (self.name,)
# initialize blockwise reduction
self.chunk: OptionalFuncTuple = _atleast_1d(chunk)
# how to aggregate results after first round of reduction
self.combine: OptionalFuncTuple = _atleast_1d(combine)
# simpler reductions used with the "simple combine" algorithm
self.simple_combine: OptionalFuncTuple = ()
# finalize results (see mean)
self.finalize: Callable | None = finalize
self.fill_value = {}
# This is used for the final reindexing
self.fill_value[name] = final_fill_value
# Aggregation.fill_value is used to reindex to group labels
# at the *intermediate* step.
# They should make sense when aggregated together with results from other blocks
self.fill_value["intermediate"] = self._normalize_dtype_fill_value(fill_value, "fill_value")
self.dtype_init: AggDtypeInit = {
"final": final_dtype,
"intermediate": self._normalize_dtype_fill_value(dtypes, "dtype"),
}
self.dtype: AggDtype = None # type: ignore[assignment]
# The following are set by _initialize_aggregation
self.finalize_kwargs: dict[Any, Any] = {}
self.min_count: int = 0
self.new_dims_func: Callable = (
returns_empty_tuple if new_dims_func is None else new_dims_func
)
@cached_property
def new_dims(self) -> tuple[Dim]:
return self.new_dims_func(**self.finalize_kwargs)
@cached_property
def num_new_vector_dims(self) -> int:
return len(tuple(dim for dim in self.new_dims if not dim.is_scalar))
def _normalize_dtype_fill_value(self, value, name):
value = _atleast_1d(value)
if len(value) == 1 and len(value) < len(self.chunk):
value = value * len(self.chunk)
if len(value) != len(self.chunk):
raise ValueError(f"Bad {name} specified for Aggregation {name}.")
return value
def __dask_tokenize__(self):
return (
Aggregation,
self.name,
self.preprocess,
self.reduction_type,
self.numpy,
self.chunk,
self.combine,
self.finalize,
self.fill_value,
self.dtype,
)
def __repr__(self) -> str:
return "\n".join(
(
f"{self.name!r}, fill: {self.fill_value.values()!r}, dtype: {self.dtype}",
f"chunk: {self.chunk!r}",
f"combine: {self.combine!r}",
f"finalize: {self.finalize!r}",
f"min_count: {self.min_count!r}",
)
)
|
(name, *, numpy: 'str | FuncTuple | None' = None, chunk: 'str | FuncTuple | None', combine: 'str | FuncTuple | None', preprocess: 'Callable | None' = None, finalize: 'Callable | None' = None, fill_value=None, final_fill_value=<NA>, dtypes=None, final_dtype: 'DTypeLike | None' = None, reduction_type: "Literal['reduce', 'argreduce']" = 'reduce', new_dims_func: 'Callable | None' = None)
|
40,188 |
flox.aggregations
|
__dask_tokenize__
| null |
def __dask_tokenize__(self):
return (
Aggregation,
self.name,
self.preprocess,
self.reduction_type,
self.numpy,
self.chunk,
self.combine,
self.finalize,
self.fill_value,
self.dtype,
)
|
(self)
|
40,189 |
flox.aggregations
|
__init__
|
Blueprint for computing grouped aggregations.
See aggregations.py for examples on how to specify reductions.
Attributes
----------
name : str
Name of reduction.
numpy : str or callable, optional
Reduction function applied to numpy inputs. This function should
compute the grouped reduction and must have a specific signature.
If string, these must be "native" reductions implemented by the backend
engines (numpy_groupies, flox, numbagg). If None, will be set to ``name``.
chunk : None or str or tuple of str or callable or tuple of callable
For dask inputs only. Either a single function or a list of
functions to be applied blockwise on the input dask array. If None, will raise
an error for dask inputs.
combine : None or str or tuple of str or callbe or tuple of callable
For dask inputs only. Functions applied when combining intermediate
results from the blockwise stage (see ``chunk``). If None, will raise an error
for dask inputs.
finalize : callable
For dask inputs only. Function that combines intermediate results to compute
final result.
preprocess : callable
For dask inputs only. Preprocess inputs before ``chunk`` stage.
reduction_type : {"reduce", "argreduce"}
Type of reduction.
fill_value : number or tuple(number), optional
Value to use when a group has no members. If single value will be converted
to tuple of same length as chunk. If appropriate, provide a different fill_value
per reduction in ``chunk`` as a tuple.
final_fill_value : optional
fill_value for final result.
dtypes : DType or tuple(DType), optional
dtypes for intermediate results. If single value, will be converted to a tuple
of same length as chunk. If appropriate, provide a different fill_value
per reduction in ``chunk`` as a tuple.
final_dtype : DType, optional
DType for output. By default, uses dtype of array being reduced.
new_dims_func: Callable
Function that receives finalize_kwargs and returns a tupleof sizes of any new dimensions
added by the reduction. For e.g. quantile for q=(0.5, 0.85) adds a new dimension of size 2,
so returns (2,)
|
def __init__(
self,
name,
*,
numpy: str | FuncTuple | None = None,
chunk: str | FuncTuple | None,
combine: str | FuncTuple | None,
preprocess: Callable | None = None,
finalize: Callable | None = None,
fill_value=None,
final_fill_value=dtypes.NA,
dtypes=None,
final_dtype: DTypeLike | None = None,
reduction_type: Literal["reduce", "argreduce"] = "reduce",
new_dims_func: Callable | None = None,
):
"""
Blueprint for computing grouped aggregations.
See aggregations.py for examples on how to specify reductions.
Attributes
----------
name : str
Name of reduction.
numpy : str or callable, optional
Reduction function applied to numpy inputs. This function should
compute the grouped reduction and must have a specific signature.
If string, these must be "native" reductions implemented by the backend
engines (numpy_groupies, flox, numbagg). If None, will be set to ``name``.
chunk : None or str or tuple of str or callable or tuple of callable
For dask inputs only. Either a single function or a list of
functions to be applied blockwise on the input dask array. If None, will raise
an error for dask inputs.
combine : None or str or tuple of str or callbe or tuple of callable
For dask inputs only. Functions applied when combining intermediate
results from the blockwise stage (see ``chunk``). If None, will raise an error
for dask inputs.
finalize : callable
For dask inputs only. Function that combines intermediate results to compute
final result.
preprocess : callable
For dask inputs only. Preprocess inputs before ``chunk`` stage.
reduction_type : {"reduce", "argreduce"}
Type of reduction.
fill_value : number or tuple(number), optional
Value to use when a group has no members. If single value will be converted
to tuple of same length as chunk. If appropriate, provide a different fill_value
per reduction in ``chunk`` as a tuple.
final_fill_value : optional
fill_value for final result.
dtypes : DType or tuple(DType), optional
dtypes for intermediate results. If single value, will be converted to a tuple
of same length as chunk. If appropriate, provide a different fill_value
per reduction in ``chunk`` as a tuple.
final_dtype : DType, optional
DType for output. By default, uses dtype of array being reduced.
new_dims_func: Callable
Function that receives finalize_kwargs and returns a tupleof sizes of any new dimensions
added by the reduction. For e.g. quantile for q=(0.5, 0.85) adds a new dimension of size 2,
so returns (2,)
"""
self.name = name
# preprocess before blockwise
self.preprocess = preprocess
# Use "chunk_reduce" or "chunk_argreduce"
self.reduction_type = reduction_type
self.numpy: FuncTuple = (numpy,) if numpy else (self.name,)
# initialize blockwise reduction
self.chunk: OptionalFuncTuple = _atleast_1d(chunk)
# how to aggregate results after first round of reduction
self.combine: OptionalFuncTuple = _atleast_1d(combine)
# simpler reductions used with the "simple combine" algorithm
self.simple_combine: OptionalFuncTuple = ()
# finalize results (see mean)
self.finalize: Callable | None = finalize
self.fill_value = {}
# This is used for the final reindexing
self.fill_value[name] = final_fill_value
# Aggregation.fill_value is used to reindex to group labels
# at the *intermediate* step.
# They should make sense when aggregated together with results from other blocks
self.fill_value["intermediate"] = self._normalize_dtype_fill_value(fill_value, "fill_value")
self.dtype_init: AggDtypeInit = {
"final": final_dtype,
"intermediate": self._normalize_dtype_fill_value(dtypes, "dtype"),
}
self.dtype: AggDtype = None # type: ignore[assignment]
# The following are set by _initialize_aggregation
self.finalize_kwargs: dict[Any, Any] = {}
self.min_count: int = 0
self.new_dims_func: Callable = (
returns_empty_tuple if new_dims_func is None else new_dims_func
)
|
(self, name, *, numpy: 'str | FuncTuple | None' = None, chunk: 'str | FuncTuple | None', combine: 'str | FuncTuple | None', preprocess: 'Callable | None' = None, finalize: 'Callable | None' = None, fill_value=None, final_fill_value=<NA>, dtypes=None, final_dtype: 'DTypeLike | None' = None, reduction_type: "Literal['reduce', 'argreduce']" = 'reduce', new_dims_func: 'Callable | None' = None)
|
40,190 |
flox.aggregations
|
__repr__
| null |
def __repr__(self) -> str:
return "\n".join(
(
f"{self.name!r}, fill: {self.fill_value.values()!r}, dtype: {self.dtype}",
f"chunk: {self.chunk!r}",
f"combine: {self.combine!r}",
f"finalize: {self.finalize!r}",
f"min_count: {self.min_count!r}",
)
)
|
(self) -> str
|
40,191 |
flox.aggregations
|
_normalize_dtype_fill_value
| null |
def _normalize_dtype_fill_value(self, value, name):
value = _atleast_1d(value)
if len(value) == 1 and len(value) < len(self.chunk):
value = value * len(self.chunk)
if len(value) != len(self.chunk):
raise ValueError(f"Bad {name} specified for Aggregation {name}.")
return value
|
(self, value, name)
|
40,192 |
flox
|
_get_version
| null |
def _get_version():
__version__ = "999"
try:
from ._version import __version__
except ImportError:
pass
return __version__
|
()
|
40,199 |
flox.core
|
groupby_reduce
|
GroupBy reductions using tree reductions for dask.array
Parameters
----------
array : ndarray or DaskArray
Array to be reduced, possibly nD
*by : ndarray or DaskArray
Array of labels to group over. Must be aligned with ``array`` so that
``array.shape[-by.ndim :] == by.shape`` or any disagreements in that
equality check are for dimensions of size 1 in `by`.
func : {"all", "any", "count", "sum", "nansum", "mean", "nanmean", "max", "nanmax", "min", "nanmin", "argmax", "nanargmax", "argmin", "nanargmin", "quantile", "nanquantile", "median", "nanmedian", "mode", "nanmode", "first", "nanfirst", "last", "nanlast"} or Aggregation
Single function name or an Aggregation instance
expected_groups : (optional) Sequence
Expected unique labels.
isbin : bool, optional
Are ``expected_groups`` bin edges?
sort : bool, optional
Whether groups should be returned in sorted order. Only applies for dask
reductions when ``method`` is not ``"map-reduce"``. For ``"map-reduce"``, the groups
are always sorted.
axis : None or int or Sequence[int], optional
If None, reduce across all dimensions of by
Else, reduce across corresponding axes of array
Negative integers are normalized using array.ndim
fill_value : Any
Value to assign when a label in ``expected_groups`` is not present.
dtype : data-type , optional
DType for the output. Can be anything that is accepted by ``np.dtype``.
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype.
method : {"map-reduce", "blockwise", "cohorts"}, optional
Strategy for reduction of dask arrays only:
* ``"map-reduce"``:
First apply the reduction blockwise on ``array``, then
combine a few newighbouring blocks, apply the reduction.
Continue until finalizing. Usually, ``func`` will need
to be an Aggregation instance for this method to work.
Common aggregations are implemented.
* ``"blockwise"``:
Only reduce using blockwise and avoid aggregating blocks
together. Useful for resampling-style reductions where group
members are always together. If `by` is 1D, `array` is automatically
rechunked so that chunk boundaries line up with group boundaries
i.e. each block contains all members of any group present
in that block. For nD `by`, you must make sure that all members of a group
are present in a single block.
* ``"cohorts"``:
Finds group labels that tend to occur together ("cohorts"),
indexes out cohorts and reduces that subset using "map-reduce",
repeat for all cohorts. This works well for many time groupings
where the group labels repeat at regular intervals like 'hour',
'month', dayofyear' etc. Optimize chunking ``array`` for this
method by first rechunking using ``rechunk_for_cohorts``
(for 1D ``by`` only).
engine : {"flox", "numpy", "numba", "numbagg"}, optional
Algorithm to compute the groupby reduction on non-dask arrays and on each dask chunk:
* ``"numpy"``:
Use the vectorized implementations in ``numpy_groupies.aggregate_numpy``.
This is the default choice because it works for most array types.
* ``"flox"``:
Use an internal implementation where the data is sorted so that
all members of a group occur sequentially, and then numpy.ufunc.reduceat
is to used for the reduction. This will fall back to ``numpy_groupies.aggregate_numpy``
for a reduction that is not yet implemented.
* ``"numba"``:
Use the implementations in ``numpy_groupies.aggregate_numba``.
* ``"numbagg"``:
Use the reductions supported by ``numbagg.grouped``. This will fall back to ``numpy_groupies.aggregate_numpy``
for a reduction that is not yet implemented.
reindex : bool, optional
Whether to "reindex" the blockwise results to ``expected_groups`` (possibly automatically detected).
If True, the intermediate result of the blockwise groupby-reduction has a value for all expected groups,
and the final result is a simple reduction of those intermediates. In nearly all cases, this is a significant
boost in computation speed. For cases like time grouping, this may result in large intermediates relative to the
original block size. Avoid that by using ``method="cohorts"``. By default, it is turned off for argreductions.
finalize_kwargs : dict, optional
Kwargs passed to finalize the reduction such as ``ddof`` for var, std or ``q`` for quantile.
Returns
-------
result
Aggregated result
*groups
Group labels
See Also
--------
xarray.xarray_reduce
|
def groupby_reduce(
array: np.ndarray | DaskArray,
*by: T_By,
func: T_Agg,
expected_groups: T_ExpectedGroupsOpt = None,
sort: bool = True,
isbin: T_IsBins = False,
axis: T_AxesOpt = None,
fill_value=None,
dtype: np.typing.DTypeLike = None,
min_count: int | None = None,
method: T_MethodOpt = None,
engine: T_EngineOpt = None,
reindex: bool | None = None,
finalize_kwargs: dict[Any, Any] | None = None,
) -> tuple[DaskArray, Unpack[tuple[np.ndarray | DaskArray, ...]]]:
"""
GroupBy reductions using tree reductions for dask.array
Parameters
----------
array : ndarray or DaskArray
Array to be reduced, possibly nD
*by : ndarray or DaskArray
Array of labels to group over. Must be aligned with ``array`` so that
``array.shape[-by.ndim :] == by.shape`` or any disagreements in that
equality check are for dimensions of size 1 in `by`.
func : {"all", "any", "count", "sum", "nansum", "mean", "nanmean", \
"max", "nanmax", "min", "nanmin", "argmax", "nanargmax", "argmin", "nanargmin", \
"quantile", "nanquantile", "median", "nanmedian", "mode", "nanmode", \
"first", "nanfirst", "last", "nanlast"} or Aggregation
Single function name or an Aggregation instance
expected_groups : (optional) Sequence
Expected unique labels.
isbin : bool, optional
Are ``expected_groups`` bin edges?
sort : bool, optional
Whether groups should be returned in sorted order. Only applies for dask
reductions when ``method`` is not ``"map-reduce"``. For ``"map-reduce"``, the groups
are always sorted.
axis : None or int or Sequence[int], optional
If None, reduce across all dimensions of by
Else, reduce across corresponding axes of array
Negative integers are normalized using array.ndim
fill_value : Any
Value to assign when a label in ``expected_groups`` is not present.
dtype : data-type , optional
DType for the output. Can be anything that is accepted by ``np.dtype``.
min_count : int, default: None
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype.
method : {"map-reduce", "blockwise", "cohorts"}, optional
Strategy for reduction of dask arrays only:
* ``"map-reduce"``:
First apply the reduction blockwise on ``array``, then
combine a few newighbouring blocks, apply the reduction.
Continue until finalizing. Usually, ``func`` will need
to be an Aggregation instance for this method to work.
Common aggregations are implemented.
* ``"blockwise"``:
Only reduce using blockwise and avoid aggregating blocks
together. Useful for resampling-style reductions where group
members are always together. If `by` is 1D, `array` is automatically
rechunked so that chunk boundaries line up with group boundaries
i.e. each block contains all members of any group present
in that block. For nD `by`, you must make sure that all members of a group
are present in a single block.
* ``"cohorts"``:
Finds group labels that tend to occur together ("cohorts"),
indexes out cohorts and reduces that subset using "map-reduce",
repeat for all cohorts. This works well for many time groupings
where the group labels repeat at regular intervals like 'hour',
'month', dayofyear' etc. Optimize chunking ``array`` for this
method by first rechunking using ``rechunk_for_cohorts``
(for 1D ``by`` only).
engine : {"flox", "numpy", "numba", "numbagg"}, optional
Algorithm to compute the groupby reduction on non-dask arrays and on each dask chunk:
* ``"numpy"``:
Use the vectorized implementations in ``numpy_groupies.aggregate_numpy``.
This is the default choice because it works for most array types.
* ``"flox"``:
Use an internal implementation where the data is sorted so that
all members of a group occur sequentially, and then numpy.ufunc.reduceat
is to used for the reduction. This will fall back to ``numpy_groupies.aggregate_numpy``
for a reduction that is not yet implemented.
* ``"numba"``:
Use the implementations in ``numpy_groupies.aggregate_numba``.
* ``"numbagg"``:
Use the reductions supported by ``numbagg.grouped``. This will fall back to ``numpy_groupies.aggregate_numpy``
for a reduction that is not yet implemented.
reindex : bool, optional
Whether to "reindex" the blockwise results to ``expected_groups`` (possibly automatically detected).
If True, the intermediate result of the blockwise groupby-reduction has a value for all expected groups,
and the final result is a simple reduction of those intermediates. In nearly all cases, this is a significant
boost in computation speed. For cases like time grouping, this may result in large intermediates relative to the
original block size. Avoid that by using ``method="cohorts"``. By default, it is turned off for argreductions.
finalize_kwargs : dict, optional
Kwargs passed to finalize the reduction such as ``ddof`` for var, std or ``q`` for quantile.
Returns
-------
result
Aggregated result
*groups
Group labels
See Also
--------
xarray.xarray_reduce
"""
if engine == "flox" and _is_arg_reduction(func):
raise NotImplementedError(
"argreductions not supported for engine='flox' yet."
"Try engine='numpy' or engine='numba' instead."
)
if engine == "numbagg" and dtype is not None:
raise NotImplementedError(
"numbagg does not support the `dtype` kwarg. Either cast your "
"input arguments to `dtype` or use a different `engine`: "
"'flox' or 'numpy' or 'numba'. "
"See https://github.com/numbagg/numbagg/issues/121."
)
if func in ["quantile", "nanquantile"]:
if finalize_kwargs is None or "q" not in finalize_kwargs:
raise ValueError("Please pass `q` for quantile calculations.")
else:
nq = len(_atleast_1d(finalize_kwargs["q"]))
if nq > 1 and engine == "numpy":
raise ValueError(
"Multiple quantiles not supported with engine='numpy'."
"Use engine='flox' instead (it is also much faster), "
"or set engine=None to use the default."
)
bys: T_Bys = tuple(np.asarray(b) if not is_duck_array(b) else b for b in by)
nby = len(bys)
by_is_dask = tuple(is_duck_dask_array(b) for b in bys)
any_by_dask = any(by_is_dask)
provided_expected = expected_groups is not None
if (
engine == "numbagg"
and _is_arg_reduction(func)
and (any_by_dask or is_duck_dask_array(array))
):
# There is only one test that fails, but I can't figure
# out why without deep debugging.
# just disable for now.
# test_groupby_reduce_axis_subset_against_numpy
# for array is 3D dask, by is 3D dask, axis=2
# We are falling back to numpy for the arg reduction,
# so presumably something is going wrong
raise NotImplementedError(
"argreductions not supported for engine='numbagg' yet."
"Try engine='numpy' or engine='numba' instead."
)
if method == "cohorts" and any_by_dask:
raise ValueError(f"method={method!r} can only be used when grouping by numpy arrays.")
reindex = _validate_reindex(
reindex, func, method, expected_groups, any_by_dask, is_duck_dask_array(array)
)
if not is_duck_array(array):
array = np.asarray(array)
is_bool_array = np.issubdtype(array.dtype, bool)
array = array.astype(int) if is_bool_array else array
isbins = _atleast_1d(isbin, nby)
_assert_by_is_aligned(array.shape, bys)
expected_groups = _validate_expected_groups(nby, expected_groups)
for idx, (expect, is_dask) in enumerate(zip(expected_groups, by_is_dask)):
if is_dask and (reindex or nby > 1) and expect is None:
raise ValueError(
f"`expected_groups` for array {idx} in `by` cannot be None since it is a dask.array."
)
# We convert to pd.Index since that lets us know if we are binning or not
# (pd.IntervalIndex or not)
expected_groups = _convert_expected_groups_to_index(expected_groups, isbins, sort)
# Don't factorize early only when
# grouping by dask arrays, and not having expected_groups
factorize_early = not (
# can't do it if we are grouping by dask array but don't have expected_groups
any(is_dask and ex_ is None for is_dask, ex_ in zip(by_is_dask, expected_groups))
)
expected_: pd.RangeIndex | None
if factorize_early:
bys, final_groups, grp_shape = _factorize_multiple(
bys,
expected_groups,
any_by_dask=any_by_dask,
sort=sort,
)
expected_ = pd.RangeIndex(math.prod(grp_shape))
else:
assert expected_groups == (None,)
expected_ = None
assert len(bys) == 1
(by_,) = bys
if axis is None:
axis_ = tuple(array.ndim + np.arange(-by_.ndim, 0))
else:
axis_ = normalize_axis_tuple(axis, array.ndim)
nax = len(axis_)
has_dask = is_duck_dask_array(array) or is_duck_dask_array(by_)
has_cubed = is_duck_cubed_array(array) or is_duck_cubed_array(by_)
if _is_first_last_reduction(func):
if has_dask and nax != 1:
raise ValueError(
"For dask arrays: first, last, nanfirst, nanlast reductions are "
"only supported along a single axis. Please reshape appropriately."
)
elif nax not in [1, by_.ndim]:
raise ValueError(
"first, last, nanfirst, nanlast reductions are only supported "
"along a single axis or when reducing across all dimensions of `by`."
)
if nax == 1 and by_.ndim > 1 and expected_ is None:
# When we reduce along all axes, we are guaranteed to see all
# groups in the final combine stage, so everything works.
# This is not necessarily true when reducing along a subset of axes
# (of by)
# TODO: Does this depend on chunking of by?
# For e.g., we could relax this if there is only one chunk along all
# by dim != axis?
raise NotImplementedError(
"Please provide ``expected_groups`` when not reducing along all axes."
)
assert nax <= by_.ndim
if nax < by_.ndim:
by_ = _move_reduce_dims_to_end(by_, tuple(-array.ndim + ax + by_.ndim for ax in axis_))
array = _move_reduce_dims_to_end(array, axis_)
axis_ = tuple(array.ndim + np.arange(-nax, 0))
nax = len(axis_)
# When axis is a subset of possible values; then npg will
# apply the fill_value to groups that don't exist along a particular axis (for e.g.)
# since these count as a group that is absent. thoo!
# fill_value applies to all-NaN groups as well as labels in expected_groups that are not found.
# The only way to do this consistently is mask out using min_count
# Consider np.sum([np.nan]) = np.nan, np.nansum([np.nan]) = 0
if min_count is None:
if nax < by_.ndim or (fill_value is not None and provided_expected):
min_count_: int = 1
else:
min_count_ = 0
else:
min_count_ = min_count
# TODO: set in xarray?
if min_count_ > 0 and func in ["nansum", "nanprod"] and fill_value is None:
# nansum, nanprod have fill_value=0, 1
# overwrite than when min_count is set
fill_value = np.nan
kwargs = dict(axis=axis_, fill_value=fill_value)
agg = _initialize_aggregation(func, dtype, array.dtype, fill_value, min_count_, finalize_kwargs)
# Need to set this early using `agg`
# It cannot be done in the core loop of chunk_reduce
# since we "prepare" the data for flox.
kwargs["engine"] = _choose_engine(by_, agg) if engine is None else engine
groups: tuple[np.ndarray | DaskArray, ...]
if has_cubed:
if method is None:
method = "map-reduce"
if method != "map-reduce":
raise NotImplementedError(
"Reduction for Cubed arrays is only implemented for method 'map-reduce'."
)
partial_agg = partial(cubed_groupby_agg, **kwargs)
result, groups = partial_agg(
array,
by_,
expected_groups=expected_,
agg=agg,
reindex=reindex,
method=method,
sort=sort,
)
return (result, groups)
elif not has_dask:
results = _reduce_blockwise(
array, by_, agg, expected_groups=expected_, reindex=reindex, sort=sort, **kwargs
)
groups = (results["groups"],)
result = results[agg.name]
else:
if TYPE_CHECKING:
# TODO: How else to narrow that array.chunks is there?
assert isinstance(array, DaskArray)
if (not any_by_dask and method is None) or method == "cohorts":
preferred_method, chunks_cohorts = find_group_cohorts(
by_,
[array.chunks[ax] for ax in range(-by_.ndim, 0)],
expected_groups=expected_,
# when provided with cohorts, we *always* 'merge'
merge=(method == "cohorts"),
)
else:
preferred_method = "map-reduce"
chunks_cohorts = {}
method = _choose_method(method, preferred_method, agg, by_, nax)
if agg.chunk[0] is None and method != "blockwise":
raise NotImplementedError(
f"Aggregation {agg.name!r} is only implemented for dask arrays when method='blockwise'."
f"Received method={method!r}"
)
if (
_is_arg_reduction(agg)
and method == "blockwise"
and not all(nchunks == 1 for nchunks in array.numblocks[-nax:])
):
raise NotImplementedError(
"arg-reductions are not supported with method='blockwise', use 'cohorts' instead."
)
if nax != by_.ndim and method in ["blockwise", "cohorts"]:
raise NotImplementedError(
"Must reduce along all dimensions of `by` when method != 'map-reduce'."
f"Received method={method!r}"
)
# TODO: clean this up
reindex = _validate_reindex(
reindex, func, method, expected_, any_by_dask, is_duck_dask_array(array)
)
if TYPE_CHECKING:
assert method is not None
# TODO: just do this in dask_groupby_agg
# we always need some fill_value (see above) so choose the default if needed
if kwargs["fill_value"] is None:
kwargs["fill_value"] = agg.fill_value[agg.name]
partial_agg = partial(dask_groupby_agg, **kwargs)
if method == "blockwise" and by_.ndim == 1:
array = rechunk_for_blockwise(array, axis=-1, labels=by_)
result, groups = partial_agg(
array,
by_,
expected_groups=expected_,
agg=agg,
reindex=reindex,
method=method,
chunks_cohorts=chunks_cohorts,
sort=sort,
)
if sort and method != "map-reduce":
assert len(groups) == 1
sorted_idx = np.argsort(groups[0])
# This optimization helps specifically with resampling
if not _issorted(sorted_idx):
result = result[..., sorted_idx]
groups = (groups[0][sorted_idx],)
if factorize_early:
# nan group labels are factorized to -1, and preserved
# now we get rid of them by reindexing
# This also handles bins with no data
result = reindex_(result, from_=groups[0], to=expected_, fill_value=fill_value).reshape(
result.shape[:-1] + grp_shape
)
groups = final_groups
if is_bool_array and (_is_minmax_reduction(func) or _is_first_last_reduction(func)):
result = result.astype(bool)
return (result, *groups)
|
(array: 'np.ndarray | DaskArray', *by: 'T_By', func: 'T_Agg', expected_groups: 'T_ExpectedGroupsOpt' = None, sort: 'bool' = True, isbin: 'T_IsBins' = False, axis: 'T_AxesOpt' = None, fill_value=None, dtype: 'np.typing.DTypeLike' = None, min_count: 'int | None' = None, method: 'T_MethodOpt' = None, engine: 'T_EngineOpt' = None, reindex: 'bool | None' = None, finalize_kwargs: 'dict[Any, Any] | None' = None) -> 'tuple[DaskArray, Unpack[tuple[np.ndarray | DaskArray, ...]]]'
|
40,200 |
flox.core
|
rechunk_for_blockwise
|
Rechunks array so that group boundaries line up with chunk boundaries, allowing
embarrassingly parallel group reductions.
This only works when the groups are sequential
(e.g. labels = ``[0,0,0,1,1,1,1,2,2]``).
Such patterns occur when using ``.resample``.
Parameters
----------
array : DaskArray
Array to rechunk
axis : int
Axis along which to rechunk the array.
labels : np.ndarray
Group labels
Returns
-------
DaskArray
Rechunked array
|
def rechunk_for_blockwise(array: DaskArray, axis: T_Axis, labels: np.ndarray) -> DaskArray:
"""
Rechunks array so that group boundaries line up with chunk boundaries, allowing
embarrassingly parallel group reductions.
This only works when the groups are sequential
(e.g. labels = ``[0,0,0,1,1,1,1,2,2]``).
Such patterns occur when using ``.resample``.
Parameters
----------
array : DaskArray
Array to rechunk
axis : int
Axis along which to rechunk the array.
labels : np.ndarray
Group labels
Returns
-------
DaskArray
Rechunked array
"""
labels = factorize_((labels,), axes=())[0]
chunks = array.chunks[axis]
newchunks = _get_optimal_chunks_for_groups(chunks, labels)
if newchunks == chunks:
return array
else:
return array.rechunk({axis: newchunks})
|
(array: 'DaskArray', axis: 'T_Axis', labels: 'np.ndarray') -> 'DaskArray'
|
40,201 |
flox.core
|
rechunk_for_cohorts
|
Rechunks array so that each new chunk contains groups that always occur together.
Parameters
----------
array : dask.array.Array
array to rechunk
axis : int
Axis to rechunk
labels : np.array
1D Group labels to align chunks with. This routine works
well when ``labels`` has repeating patterns: e.g.
``1, 2, 3, 1, 2, 3, 4, 1, 2, 3`` though there is no requirement
that the pattern must contain sequences.
force_new_chunk_at : Sequence
Labels at which we always start a new chunk. For
the example ``labels`` array, this would be `1`.
chunksize : int, optional
nominal chunk size. Chunk size is exceeded when the label
in ``force_new_chunk_at`` is less than ``chunksize//2`` elements away.
If None, uses median chunksize along axis.
Returns
-------
dask.array.Array
rechunked array
|
def rechunk_for_cohorts(
array: DaskArray,
axis: T_Axis,
labels: np.ndarray,
force_new_chunk_at: Sequence,
chunksize: int | None = None,
ignore_old_chunks: bool = False,
debug: bool = False,
) -> DaskArray:
"""
Rechunks array so that each new chunk contains groups that always occur together.
Parameters
----------
array : dask.array.Array
array to rechunk
axis : int
Axis to rechunk
labels : np.array
1D Group labels to align chunks with. This routine works
well when ``labels`` has repeating patterns: e.g.
``1, 2, 3, 1, 2, 3, 4, 1, 2, 3`` though there is no requirement
that the pattern must contain sequences.
force_new_chunk_at : Sequence
Labels at which we always start a new chunk. For
the example ``labels`` array, this would be `1`.
chunksize : int, optional
nominal chunk size. Chunk size is exceeded when the label
in ``force_new_chunk_at`` is less than ``chunksize//2`` elements away.
If None, uses median chunksize along axis.
Returns
-------
dask.array.Array
rechunked array
"""
if chunksize is None:
chunksize = np.median(array.chunks[axis]).astype(int)
if len(labels) != array.shape[axis]:
raise ValueError(
"labels must be equal to array.shape[axis]. "
f"Received length {len(labels)}. Expected length {array.shape[axis]}"
)
force_new_chunk_at = _atleast_1d(force_new_chunk_at)
oldchunks = array.chunks[axis]
oldbreaks = np.insert(np.cumsum(oldchunks), 0, 0)
if debug:
labels_at_breaks = labels[oldbreaks[:-1]]
print(labels_at_breaks[:40])
isbreak = np.isin(labels, force_new_chunk_at)
if not np.any(isbreak):
raise ValueError("One or more labels in ``force_new_chunk_at`` not present in ``labels``.")
divisions = []
counter = 1
for idx, lab in enumerate(labels):
if lab in force_new_chunk_at or idx == 0:
divisions.append(idx)
counter = 1
continue
next_break = np.nonzero(isbreak[idx:])[0]
if next_break.any():
next_break_is_close = next_break[0] <= chunksize // 2
else:
next_break_is_close = False
if (not ignore_old_chunks and idx in oldbreaks) or (
counter >= chunksize and not next_break_is_close
):
divisions.append(idx)
counter = 1
continue
counter += 1
divisions.append(len(labels))
if debug:
labels_at_breaks = labels[divisions[:-1]]
print(labels_at_breaks[:40])
newchunks = tuple(np.diff(divisions))
if debug:
print(divisions[:10], newchunks[:10])
print(divisions[-10:], newchunks[-10:])
assert sum(newchunks) == len(labels)
if newchunks == array.chunks[axis]:
return array
else:
return array.rechunk({axis: newchunks})
|
(array: 'DaskArray', axis: 'T_Axis', labels: 'np.ndarray', force_new_chunk_at: 'Sequence', chunksize: 'int | None' = None, ignore_old_chunks: 'bool' = False, debug: 'bool' = False) -> 'DaskArray'
|
40,204 |
cronsim.cronsim
|
CronSim
| null |
class CronSim(object):
LAST = -1000
LAST_WEEKDAY = -1001
def __init__(self, expr: str, dt: datetime):
self.dt = dt.replace(second=0, microsecond=0)
self.parts = expr.upper().split()
if len(self.parts) != 5:
raise CronSimError("Wrong number of fields")
# In Debian cron, if either the day-of-month or the day-of-week field
# starts with a star, then there is an "AND" relationship between them.
# Otherwise it's "OR".
self.day_and = self.parts[2].startswith("*") or self.parts[4].startswith("*")
self.minutes = cast(Set[int], Field.MINUTE.parse(self.parts[0]))
self.hours = cast(Set[int], Field.HOUR.parse(self.parts[1]))
self.days = cast(Set[int], Field.DAY.parse(self.parts[2]))
self.months = cast(Set[int], Field.MONTH.parse(self.parts[3]))
self.weekdays = Field.DOW.parse(self.parts[4])
if len(self.days) and min(self.days) > 29:
# Check if we have any month with enough days
if min(self.days) > max(DAYS_IN_MONTH[month] for month in self.months):
raise CronSimError(Field.DAY.msg())
self.fixup_tz = None
if self.dt.tzinfo in (None, UTC):
# No special DST handling for UTC
pass
else:
if not self.parts[0].startswith("*") and not self.parts[1].startswith("*"):
# Will use special handling for jobs that run at specific time, or
# with a granularity greater than one hour (to mimic Debian cron).
self.fixup_tz = self.dt.tzinfo
self.dt = self.dt.replace(tzinfo=None)
def tick(self, minutes: int = 1) -> None:
"""Roll self.dt forward by 1 or more minutes and fix timezone."""
if self.dt.tzinfo not in (None, UTC):
as_utc = self.dt.astimezone(UTC)
as_utc += td(minutes=minutes)
self.dt = as_utc.astimezone(self.dt.tzinfo)
else:
self.dt += td(minutes=minutes)
def advance_minute(self) -> bool:
"""Roll forward the minute component until it satisfies the constraints.
Return False if the minute meets contraints without modification.
Return True if self.dt was rolled forward.
"""
if self.dt.minute in self.minutes:
return False
if len(self.minutes) == 1:
# An optimization for the special case where self.minutes has exactly
# one element. Instead of advancing one minute per iteration,
# make a jump from the current minute to the target minute.
delta = (next(iter(self.minutes)) - self.dt.minute) % 60
self.tick(minutes=delta)
while self.dt.minute not in self.minutes:
self.tick()
if self.dt.minute == 0:
# Break out to re-check month, day and hour
break
return True
def advance_hour(self) -> bool:
"""Roll forward the hour component until it satisfies the constraints.
Return False if the hour meets contraints without modification.
Return True if self.dt was rolled forward.
"""
if self.dt.hour in self.hours:
return False
self.dt = self.dt.replace(minute=0)
while self.dt.hour not in self.hours:
self.tick(minutes=60)
if self.dt.hour == 0:
# break out to re-check month and day
break
return True
def match_dom(self, d: date) -> bool:
"""Return True is day-of-month matches."""
if d.day in self.days:
return True
# Optimization: there are no months with fewer than 28 days.
# If 28th is Sunday, the last weekday of the month is the 26th.
# Any date before 26th cannot be the the last weekday of the month.
if self.LAST_WEEKDAY in self.days and d.day >= 26:
if d.day == last_weekday(d.year, d.month):
return True
# Optimization: there are no months with fewer than 28 days,
# so any date before 28th cannot be the the last day of the month
if self.LAST in self.days and d.day >= 28:
_, last = calendar.monthrange(d.year, d.month)
if d.day == last:
return True
return False
def match_dow(self, d: date) -> bool:
"""Return True is day-of-week matches."""
dow = d.weekday() + 1
if dow in self.weekdays or dow % 7 in self.weekdays:
return True
if (dow, self.LAST) in self.weekdays or (dow % 7, self.LAST) in self.weekdays:
_, last = calendar.monthrange(d.year, d.month)
if d.day + 7 > last:
# Same day next week would be outside this month.
# So this is the last one this month.
return True
idx = (d.day + 6) // 7
if (dow, idx) in self.weekdays or (dow % 7, idx) in self.weekdays:
return True
return False
def match_day(self, d: date) -> bool:
if self.day_and:
return self.match_dom(d) and self.match_dow(d)
return self.match_dom(d) or self.match_dow(d)
def advance_day(self) -> bool:
"""Roll forward the day component until it satisfies the constraints.
This method advances the date until it matches either the
day-of-month, or the day-of-week constraint.
Return False if the day meets contraints without modification.
Return True if self.dt was rolled forward.
"""
needle = self.dt.date()
if self.match_day(needle):
return False
while not self.match_day(needle):
needle += td(days=1)
if needle.day == 1:
# We're in a different month now, break out to re-check month
# This significantly speeds up the "0 0 * 2 MON#5" case
break
self.dt = datetime.combine(needle, time(), tzinfo=self.dt.tzinfo)
return True
def advance_month(self) -> None:
"""Roll forward the month component until it satisfies the constraints."""
if self.dt.month in self.months:
return
needle = self.dt.date()
while needle.month not in self.months:
needle = (needle.replace(day=1) + td(days=32)).replace(day=1)
self.dt = datetime.combine(needle, time(), tzinfo=self.dt.tzinfo)
def __iter__(self) -> "CronSim":
return self
def __next__(self) -> datetime:
self.tick()
start_year = self.dt.year
while True:
self.advance_month()
if self.dt.year > start_year + 50:
# Give up if there is no match for 50 years.
# It would be nice to detect "this will never yield any results"
# situations in a more intelligent way.
raise StopIteration
if self.advance_day():
continue
if self.advance_hour():
continue
if self.advance_minute():
continue
# If all constraints are satisfied then we have the result.
# The last step is to check if we need to fix up an imaginary
# or ambiguous date.
if self.fixup_tz:
result = self.dt.replace(tzinfo=self.fixup_tz, fold=0)
while is_imaginary(result):
self.dt += td(minutes=1)
result = self.dt.replace(tzinfo=self.fixup_tz)
return result
return self.dt
def explain(self) -> str:
from cronsim.explain import Expression
return Expression(self.parts).explain()
|
(expr: 'str', dt: 'datetime')
|
40,205 |
cronsim.cronsim
|
__init__
| null |
def __init__(self, expr: str, dt: datetime):
self.dt = dt.replace(second=0, microsecond=0)
self.parts = expr.upper().split()
if len(self.parts) != 5:
raise CronSimError("Wrong number of fields")
# In Debian cron, if either the day-of-month or the day-of-week field
# starts with a star, then there is an "AND" relationship between them.
# Otherwise it's "OR".
self.day_and = self.parts[2].startswith("*") or self.parts[4].startswith("*")
self.minutes = cast(Set[int], Field.MINUTE.parse(self.parts[0]))
self.hours = cast(Set[int], Field.HOUR.parse(self.parts[1]))
self.days = cast(Set[int], Field.DAY.parse(self.parts[2]))
self.months = cast(Set[int], Field.MONTH.parse(self.parts[3]))
self.weekdays = Field.DOW.parse(self.parts[4])
if len(self.days) and min(self.days) > 29:
# Check if we have any month with enough days
if min(self.days) > max(DAYS_IN_MONTH[month] for month in self.months):
raise CronSimError(Field.DAY.msg())
self.fixup_tz = None
if self.dt.tzinfo in (None, UTC):
# No special DST handling for UTC
pass
else:
if not self.parts[0].startswith("*") and not self.parts[1].startswith("*"):
# Will use special handling for jobs that run at specific time, or
# with a granularity greater than one hour (to mimic Debian cron).
self.fixup_tz = self.dt.tzinfo
self.dt = self.dt.replace(tzinfo=None)
|
(self, expr: str, dt: datetime.datetime)
|
40,206 |
cronsim.cronsim
|
__iter__
| null |
def __iter__(self) -> "CronSim":
return self
|
(self) -> cronsim.cronsim.CronSim
|
40,207 |
cronsim.cronsim
|
__next__
| null |
def __next__(self) -> datetime:
self.tick()
start_year = self.dt.year
while True:
self.advance_month()
if self.dt.year > start_year + 50:
# Give up if there is no match for 50 years.
# It would be nice to detect "this will never yield any results"
# situations in a more intelligent way.
raise StopIteration
if self.advance_day():
continue
if self.advance_hour():
continue
if self.advance_minute():
continue
# If all constraints are satisfied then we have the result.
# The last step is to check if we need to fix up an imaginary
# or ambiguous date.
if self.fixup_tz:
result = self.dt.replace(tzinfo=self.fixup_tz, fold=0)
while is_imaginary(result):
self.dt += td(minutes=1)
result = self.dt.replace(tzinfo=self.fixup_tz)
return result
return self.dt
|
(self) -> datetime.datetime
|
40,208 |
cronsim.cronsim
|
advance_day
|
Roll forward the day component until it satisfies the constraints.
This method advances the date until it matches either the
day-of-month, or the day-of-week constraint.
Return False if the day meets contraints without modification.
Return True if self.dt was rolled forward.
|
def advance_day(self) -> bool:
"""Roll forward the day component until it satisfies the constraints.
This method advances the date until it matches either the
day-of-month, or the day-of-week constraint.
Return False if the day meets contraints without modification.
Return True if self.dt was rolled forward.
"""
needle = self.dt.date()
if self.match_day(needle):
return False
while not self.match_day(needle):
needle += td(days=1)
if needle.day == 1:
# We're in a different month now, break out to re-check month
# This significantly speeds up the "0 0 * 2 MON#5" case
break
self.dt = datetime.combine(needle, time(), tzinfo=self.dt.tzinfo)
return True
|
(self) -> bool
|
40,209 |
cronsim.cronsim
|
advance_hour
|
Roll forward the hour component until it satisfies the constraints.
Return False if the hour meets contraints without modification.
Return True if self.dt was rolled forward.
|
def advance_hour(self) -> bool:
"""Roll forward the hour component until it satisfies the constraints.
Return False if the hour meets contraints without modification.
Return True if self.dt was rolled forward.
"""
if self.dt.hour in self.hours:
return False
self.dt = self.dt.replace(minute=0)
while self.dt.hour not in self.hours:
self.tick(minutes=60)
if self.dt.hour == 0:
# break out to re-check month and day
break
return True
|
(self) -> bool
|
40,210 |
cronsim.cronsim
|
advance_minute
|
Roll forward the minute component until it satisfies the constraints.
Return False if the minute meets contraints without modification.
Return True if self.dt was rolled forward.
|
def advance_minute(self) -> bool:
"""Roll forward the minute component until it satisfies the constraints.
Return False if the minute meets contraints without modification.
Return True if self.dt was rolled forward.
"""
if self.dt.minute in self.minutes:
return False
if len(self.minutes) == 1:
# An optimization for the special case where self.minutes has exactly
# one element. Instead of advancing one minute per iteration,
# make a jump from the current minute to the target minute.
delta = (next(iter(self.minutes)) - self.dt.minute) % 60
self.tick(minutes=delta)
while self.dt.minute not in self.minutes:
self.tick()
if self.dt.minute == 0:
# Break out to re-check month, day and hour
break
return True
|
(self) -> bool
|
40,211 |
cronsim.cronsim
|
advance_month
|
Roll forward the month component until it satisfies the constraints.
|
def advance_month(self) -> None:
"""Roll forward the month component until it satisfies the constraints."""
if self.dt.month in self.months:
return
needle = self.dt.date()
while needle.month not in self.months:
needle = (needle.replace(day=1) + td(days=32)).replace(day=1)
self.dt = datetime.combine(needle, time(), tzinfo=self.dt.tzinfo)
|
(self) -> NoneType
|
40,212 |
cronsim.cronsim
|
explain
| null |
def explain(self) -> str:
from cronsim.explain import Expression
return Expression(self.parts).explain()
|
(self) -> str
|
40,213 |
cronsim.cronsim
|
match_day
| null |
def match_day(self, d: date) -> bool:
if self.day_and:
return self.match_dom(d) and self.match_dow(d)
return self.match_dom(d) or self.match_dow(d)
|
(self, d: datetime.date) -> bool
|
40,214 |
cronsim.cronsim
|
match_dom
|
Return True is day-of-month matches.
|
def match_dom(self, d: date) -> bool:
"""Return True is day-of-month matches."""
if d.day in self.days:
return True
# Optimization: there are no months with fewer than 28 days.
# If 28th is Sunday, the last weekday of the month is the 26th.
# Any date before 26th cannot be the the last weekday of the month.
if self.LAST_WEEKDAY in self.days and d.day >= 26:
if d.day == last_weekday(d.year, d.month):
return True
# Optimization: there are no months with fewer than 28 days,
# so any date before 28th cannot be the the last day of the month
if self.LAST in self.days and d.day >= 28:
_, last = calendar.monthrange(d.year, d.month)
if d.day == last:
return True
return False
|
(self, d: datetime.date) -> bool
|
40,215 |
cronsim.cronsim
|
match_dow
|
Return True is day-of-week matches.
|
def match_dow(self, d: date) -> bool:
"""Return True is day-of-week matches."""
dow = d.weekday() + 1
if dow in self.weekdays or dow % 7 in self.weekdays:
return True
if (dow, self.LAST) in self.weekdays or (dow % 7, self.LAST) in self.weekdays:
_, last = calendar.monthrange(d.year, d.month)
if d.day + 7 > last:
# Same day next week would be outside this month.
# So this is the last one this month.
return True
idx = (d.day + 6) // 7
if (dow, idx) in self.weekdays or (dow % 7, idx) in self.weekdays:
return True
return False
|
(self, d: datetime.date) -> bool
|
40,216 |
cronsim.cronsim
|
tick
|
Roll self.dt forward by 1 or more minutes and fix timezone.
|
def tick(self, minutes: int = 1) -> None:
"""Roll self.dt forward by 1 or more minutes and fix timezone."""
if self.dt.tzinfo not in (None, UTC):
as_utc = self.dt.astimezone(UTC)
as_utc += td(minutes=minutes)
self.dt = as_utc.astimezone(self.dt.tzinfo)
else:
self.dt += td(minutes=minutes)
|
(self, minutes: int = 1) -> NoneType
|
40,217 |
cronsim.cronsim
|
CronSimError
| null |
class CronSimError(Exception):
pass
| null |
40,219 |
trio_websocket._impl
|
CloseReason
|
Contains information about why a WebSocket was closed.
|
class CloseReason:
''' Contains information about why a WebSocket was closed. '''
def __init__(self, code, reason):
'''
Constructor.
:param int code:
:param Optional[str] reason:
'''
self._code = code
try:
self._name = wsframeproto.CloseReason(code).name
except ValueError:
if 1000 <= code <= 2999:
self._name = 'RFC_RESERVED'
elif 3000 <= code <= 3999:
self._name = 'IANA_RESERVED'
elif 4000 <= code <= 4999:
self._name = 'PRIVATE_RESERVED'
else:
self._name = 'INVALID_CODE'
self._reason = reason
@property
def code(self):
''' (Read-only) The numeric close code. '''
return self._code
@property
def name(self):
''' (Read-only) The human-readable close code. '''
return self._name
@property
def reason(self):
''' (Read-only) An arbitrary reason string. '''
return self._reason
def __repr__(self):
''' Show close code, name, and reason. '''
return f'{self.__class__.__name__}' \
f'<code={self.code}, name={self.name}, reason={self.reason}>'
|
(code, reason)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.