Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/_version.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/rrule.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/parser/__init__.py +61 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/parser/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/parser/__pycache__/_parser.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/parser/__pycache__/isoparser.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/parser/_parser.py +1613 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/parser/isoparser.py +416 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/__init__.py +167 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/rebuild.py +75 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__init__.py +69 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/exceptions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/_version.py +21 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/archive.py +73 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/caching.py +875 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/compression.py +174 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/config.py +131 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/conftest.py +55 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/core.py +710 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/dircache.py +98 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/exceptions.py +17 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/fuse.py +324 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/generic.py +403 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/gui.py +414 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/implementations/data.py +48 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/implementations/dirfs.py +364 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/implementations/ftp.py +385 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/implementations/http.py +868 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/implementations/jupyter.py +124 -0
- env-llmeval/lib/python3.10/site-packages/fsspec/implementations/memory.py +292 -0
env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/_version.cpython-310.pyc
ADDED
Binary file (274 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc
ADDED
Binary file (2.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/rrule.cpython-310.pyc
ADDED
Binary file (43.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc
ADDED
Binary file (193 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (2.26 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/parser/__init__.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
from ._parser import parse, parser, parserinfo, ParserError
|
3 |
+
from ._parser import DEFAULTPARSER, DEFAULTTZPARSER
|
4 |
+
from ._parser import UnknownTimezoneWarning
|
5 |
+
|
6 |
+
from ._parser import __doc__
|
7 |
+
|
8 |
+
from .isoparser import isoparser, isoparse
|
9 |
+
|
10 |
+
__all__ = ['parse', 'parser', 'parserinfo',
|
11 |
+
'isoparse', 'isoparser',
|
12 |
+
'ParserError',
|
13 |
+
'UnknownTimezoneWarning']
|
14 |
+
|
15 |
+
|
16 |
+
###
|
17 |
+
# Deprecate portions of the private interface so that downstream code that
|
18 |
+
# is improperly relying on it is given *some* notice.
|
19 |
+
|
20 |
+
|
21 |
+
def __deprecated_private_func(f):
|
22 |
+
from functools import wraps
|
23 |
+
import warnings
|
24 |
+
|
25 |
+
msg = ('{name} is a private function and may break without warning, '
|
26 |
+
'it will be moved and or renamed in future versions.')
|
27 |
+
msg = msg.format(name=f.__name__)
|
28 |
+
|
29 |
+
@wraps(f)
|
30 |
+
def deprecated_func(*args, **kwargs):
|
31 |
+
warnings.warn(msg, DeprecationWarning)
|
32 |
+
return f(*args, **kwargs)
|
33 |
+
|
34 |
+
return deprecated_func
|
35 |
+
|
36 |
+
def __deprecate_private_class(c):
|
37 |
+
import warnings
|
38 |
+
|
39 |
+
msg = ('{name} is a private class and may break without warning, '
|
40 |
+
'it will be moved and or renamed in future versions.')
|
41 |
+
msg = msg.format(name=c.__name__)
|
42 |
+
|
43 |
+
class private_class(c):
|
44 |
+
__doc__ = c.__doc__
|
45 |
+
|
46 |
+
def __init__(self, *args, **kwargs):
|
47 |
+
warnings.warn(msg, DeprecationWarning)
|
48 |
+
super(private_class, self).__init__(*args, **kwargs)
|
49 |
+
|
50 |
+
private_class.__name__ = c.__name__
|
51 |
+
|
52 |
+
return private_class
|
53 |
+
|
54 |
+
|
55 |
+
from ._parser import _timelex, _resultbase
|
56 |
+
from ._parser import _tzparser, _parsetz
|
57 |
+
|
58 |
+
_timelex = __deprecate_private_class(_timelex)
|
59 |
+
_tzparser = __deprecate_private_class(_tzparser)
|
60 |
+
_resultbase = __deprecate_private_class(_resultbase)
|
61 |
+
_parsetz = __deprecated_private_func(_parsetz)
|
env-llmeval/lib/python3.10/site-packages/dateutil/parser/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.07 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/parser/__pycache__/_parser.cpython-310.pyc
ADDED
Binary file (40.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/parser/__pycache__/isoparser.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/parser/_parser.py
ADDED
@@ -0,0 +1,1613 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
This module offers a generic date/time string parser which is able to parse
|
4 |
+
most known formats to represent a date and/or time.
|
5 |
+
|
6 |
+
This module attempts to be forgiving with regards to unlikely input formats,
|
7 |
+
returning a datetime object even for dates which are ambiguous. If an element
|
8 |
+
of a date/time stamp is omitted, the following rules are applied:
|
9 |
+
|
10 |
+
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
|
11 |
+
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
|
12 |
+
specified.
|
13 |
+
- If a time zone is omitted, a timezone-naive datetime is returned.
|
14 |
+
|
15 |
+
If any other elements are missing, they are taken from the
|
16 |
+
:class:`datetime.datetime` object passed to the parameter ``default``. If this
|
17 |
+
results in a day number exceeding the valid number of days per month, the
|
18 |
+
value falls back to the end of the month.
|
19 |
+
|
20 |
+
Additional resources about date/time string formats can be found below:
|
21 |
+
|
22 |
+
- `A summary of the international standard date and time notation
|
23 |
+
<https://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
|
24 |
+
- `W3C Date and Time Formats <https://www.w3.org/TR/NOTE-datetime>`_
|
25 |
+
- `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_
|
26 |
+
- `CPAN ParseDate module
|
27 |
+
<https://metacpan.org/pod/release/MUIR/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
|
28 |
+
- `Java SimpleDateFormat Class
|
29 |
+
<https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
|
30 |
+
"""
|
31 |
+
from __future__ import unicode_literals
|
32 |
+
|
33 |
+
import datetime
|
34 |
+
import re
|
35 |
+
import string
|
36 |
+
import time
|
37 |
+
import warnings
|
38 |
+
|
39 |
+
from calendar import monthrange
|
40 |
+
from io import StringIO
|
41 |
+
|
42 |
+
import six
|
43 |
+
from six import integer_types, text_type
|
44 |
+
|
45 |
+
from decimal import Decimal
|
46 |
+
|
47 |
+
from warnings import warn
|
48 |
+
|
49 |
+
from .. import relativedelta
|
50 |
+
from .. import tz
|
51 |
+
|
52 |
+
__all__ = ["parse", "parserinfo", "ParserError"]
|
53 |
+
|
54 |
+
|
55 |
+
# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
|
56 |
+
# making public and/or figuring out if there is something we can
|
57 |
+
# take off their plate.
|
58 |
+
class _timelex(object):
|
59 |
+
# Fractional seconds are sometimes split by a comma
|
60 |
+
_split_decimal = re.compile("([.,])")
|
61 |
+
|
62 |
+
def __init__(self, instream):
|
63 |
+
if isinstance(instream, (bytes, bytearray)):
|
64 |
+
instream = instream.decode()
|
65 |
+
|
66 |
+
if isinstance(instream, text_type):
|
67 |
+
instream = StringIO(instream)
|
68 |
+
elif getattr(instream, 'read', None) is None:
|
69 |
+
raise TypeError('Parser must be a string or character stream, not '
|
70 |
+
'{itype}'.format(itype=instream.__class__.__name__))
|
71 |
+
|
72 |
+
self.instream = instream
|
73 |
+
self.charstack = []
|
74 |
+
self.tokenstack = []
|
75 |
+
self.eof = False
|
76 |
+
|
77 |
+
def get_token(self):
|
78 |
+
"""
|
79 |
+
This function breaks the time string into lexical units (tokens), which
|
80 |
+
can be parsed by the parser. Lexical units are demarcated by changes in
|
81 |
+
the character set, so any continuous string of letters is considered
|
82 |
+
one unit, any continuous string of numbers is considered one unit.
|
83 |
+
|
84 |
+
The main complication arises from the fact that dots ('.') can be used
|
85 |
+
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
|
86 |
+
"4:30:21.447"). As such, it is necessary to read the full context of
|
87 |
+
any dot-separated strings before breaking it into tokens; as such, this
|
88 |
+
function maintains a "token stack", for when the ambiguous context
|
89 |
+
demands that multiple tokens be parsed at once.
|
90 |
+
"""
|
91 |
+
if self.tokenstack:
|
92 |
+
return self.tokenstack.pop(0)
|
93 |
+
|
94 |
+
seenletters = False
|
95 |
+
token = None
|
96 |
+
state = None
|
97 |
+
|
98 |
+
while not self.eof:
|
99 |
+
# We only realize that we've reached the end of a token when we
|
100 |
+
# find a character that's not part of the current token - since
|
101 |
+
# that character may be part of the next token, it's stored in the
|
102 |
+
# charstack.
|
103 |
+
if self.charstack:
|
104 |
+
nextchar = self.charstack.pop(0)
|
105 |
+
else:
|
106 |
+
nextchar = self.instream.read(1)
|
107 |
+
while nextchar == '\x00':
|
108 |
+
nextchar = self.instream.read(1)
|
109 |
+
|
110 |
+
if not nextchar:
|
111 |
+
self.eof = True
|
112 |
+
break
|
113 |
+
elif not state:
|
114 |
+
# First character of the token - determines if we're starting
|
115 |
+
# to parse a word, a number or something else.
|
116 |
+
token = nextchar
|
117 |
+
if self.isword(nextchar):
|
118 |
+
state = 'a'
|
119 |
+
elif self.isnum(nextchar):
|
120 |
+
state = '0'
|
121 |
+
elif self.isspace(nextchar):
|
122 |
+
token = ' '
|
123 |
+
break # emit token
|
124 |
+
else:
|
125 |
+
break # emit token
|
126 |
+
elif state == 'a':
|
127 |
+
# If we've already started reading a word, we keep reading
|
128 |
+
# letters until we find something that's not part of a word.
|
129 |
+
seenletters = True
|
130 |
+
if self.isword(nextchar):
|
131 |
+
token += nextchar
|
132 |
+
elif nextchar == '.':
|
133 |
+
token += nextchar
|
134 |
+
state = 'a.'
|
135 |
+
else:
|
136 |
+
self.charstack.append(nextchar)
|
137 |
+
break # emit token
|
138 |
+
elif state == '0':
|
139 |
+
# If we've already started reading a number, we keep reading
|
140 |
+
# numbers until we find something that doesn't fit.
|
141 |
+
if self.isnum(nextchar):
|
142 |
+
token += nextchar
|
143 |
+
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
|
144 |
+
token += nextchar
|
145 |
+
state = '0.'
|
146 |
+
else:
|
147 |
+
self.charstack.append(nextchar)
|
148 |
+
break # emit token
|
149 |
+
elif state == 'a.':
|
150 |
+
# If we've seen some letters and a dot separator, continue
|
151 |
+
# parsing, and the tokens will be broken up later.
|
152 |
+
seenletters = True
|
153 |
+
if nextchar == '.' or self.isword(nextchar):
|
154 |
+
token += nextchar
|
155 |
+
elif self.isnum(nextchar) and token[-1] == '.':
|
156 |
+
token += nextchar
|
157 |
+
state = '0.'
|
158 |
+
else:
|
159 |
+
self.charstack.append(nextchar)
|
160 |
+
break # emit token
|
161 |
+
elif state == '0.':
|
162 |
+
# If we've seen at least one dot separator, keep going, we'll
|
163 |
+
# break up the tokens later.
|
164 |
+
if nextchar == '.' or self.isnum(nextchar):
|
165 |
+
token += nextchar
|
166 |
+
elif self.isword(nextchar) and token[-1] == '.':
|
167 |
+
token += nextchar
|
168 |
+
state = 'a.'
|
169 |
+
else:
|
170 |
+
self.charstack.append(nextchar)
|
171 |
+
break # emit token
|
172 |
+
|
173 |
+
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
|
174 |
+
token[-1] in '.,')):
|
175 |
+
l = self._split_decimal.split(token)
|
176 |
+
token = l[0]
|
177 |
+
for tok in l[1:]:
|
178 |
+
if tok:
|
179 |
+
self.tokenstack.append(tok)
|
180 |
+
|
181 |
+
if state == '0.' and token.count('.') == 0:
|
182 |
+
token = token.replace(',', '.')
|
183 |
+
|
184 |
+
return token
|
185 |
+
|
186 |
+
def __iter__(self):
|
187 |
+
return self
|
188 |
+
|
189 |
+
def __next__(self):
|
190 |
+
token = self.get_token()
|
191 |
+
if token is None:
|
192 |
+
raise StopIteration
|
193 |
+
|
194 |
+
return token
|
195 |
+
|
196 |
+
def next(self):
|
197 |
+
return self.__next__() # Python 2.x support
|
198 |
+
|
199 |
+
@classmethod
|
200 |
+
def split(cls, s):
|
201 |
+
return list(cls(s))
|
202 |
+
|
203 |
+
@classmethod
|
204 |
+
def isword(cls, nextchar):
|
205 |
+
""" Whether or not the next character is part of a word """
|
206 |
+
return nextchar.isalpha()
|
207 |
+
|
208 |
+
@classmethod
|
209 |
+
def isnum(cls, nextchar):
|
210 |
+
""" Whether the next character is part of a number """
|
211 |
+
return nextchar.isdigit()
|
212 |
+
|
213 |
+
@classmethod
|
214 |
+
def isspace(cls, nextchar):
|
215 |
+
""" Whether the next character is whitespace """
|
216 |
+
return nextchar.isspace()
|
217 |
+
|
218 |
+
|
219 |
+
class _resultbase(object):
|
220 |
+
|
221 |
+
def __init__(self):
|
222 |
+
for attr in self.__slots__:
|
223 |
+
setattr(self, attr, None)
|
224 |
+
|
225 |
+
def _repr(self, classname):
|
226 |
+
l = []
|
227 |
+
for attr in self.__slots__:
|
228 |
+
value = getattr(self, attr)
|
229 |
+
if value is not None:
|
230 |
+
l.append("%s=%s" % (attr, repr(value)))
|
231 |
+
return "%s(%s)" % (classname, ", ".join(l))
|
232 |
+
|
233 |
+
def __len__(self):
|
234 |
+
return (sum(getattr(self, attr) is not None
|
235 |
+
for attr in self.__slots__))
|
236 |
+
|
237 |
+
def __repr__(self):
|
238 |
+
return self._repr(self.__class__.__name__)
|
239 |
+
|
240 |
+
|
241 |
+
class parserinfo(object):
|
242 |
+
"""
|
243 |
+
Class which handles what inputs are accepted. Subclass this to customize
|
244 |
+
the language and acceptable values for each parameter.
|
245 |
+
|
246 |
+
:param dayfirst:
|
247 |
+
Whether to interpret the first value in an ambiguous 3-integer date
|
248 |
+
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
|
249 |
+
``yearfirst`` is set to ``True``, this distinguishes between YDM
|
250 |
+
and YMD. Default is ``False``.
|
251 |
+
|
252 |
+
:param yearfirst:
|
253 |
+
Whether to interpret the first value in an ambiguous 3-integer date
|
254 |
+
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
|
255 |
+
to be the year, otherwise the last number is taken to be the year.
|
256 |
+
Default is ``False``.
|
257 |
+
"""
|
258 |
+
|
259 |
+
# m from a.m/p.m, t from ISO T separator
|
260 |
+
JUMP = [" ", ".", ",", ";", "-", "/", "'",
|
261 |
+
"at", "on", "and", "ad", "m", "t", "of",
|
262 |
+
"st", "nd", "rd", "th"]
|
263 |
+
|
264 |
+
WEEKDAYS = [("Mon", "Monday"),
|
265 |
+
("Tue", "Tuesday"), # TODO: "Tues"
|
266 |
+
("Wed", "Wednesday"),
|
267 |
+
("Thu", "Thursday"), # TODO: "Thurs"
|
268 |
+
("Fri", "Friday"),
|
269 |
+
("Sat", "Saturday"),
|
270 |
+
("Sun", "Sunday")]
|
271 |
+
MONTHS = [("Jan", "January"),
|
272 |
+
("Feb", "February"), # TODO: "Febr"
|
273 |
+
("Mar", "March"),
|
274 |
+
("Apr", "April"),
|
275 |
+
("May", "May"),
|
276 |
+
("Jun", "June"),
|
277 |
+
("Jul", "July"),
|
278 |
+
("Aug", "August"),
|
279 |
+
("Sep", "Sept", "September"),
|
280 |
+
("Oct", "October"),
|
281 |
+
("Nov", "November"),
|
282 |
+
("Dec", "December")]
|
283 |
+
HMS = [("h", "hour", "hours"),
|
284 |
+
("m", "minute", "minutes"),
|
285 |
+
("s", "second", "seconds")]
|
286 |
+
AMPM = [("am", "a"),
|
287 |
+
("pm", "p")]
|
288 |
+
UTCZONE = ["UTC", "GMT", "Z", "z"]
|
289 |
+
PERTAIN = ["of"]
|
290 |
+
TZOFFSET = {}
|
291 |
+
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
|
292 |
+
# "Anno Domini", "Year of Our Lord"]
|
293 |
+
|
294 |
+
def __init__(self, dayfirst=False, yearfirst=False):
|
295 |
+
self._jump = self._convert(self.JUMP)
|
296 |
+
self._weekdays = self._convert(self.WEEKDAYS)
|
297 |
+
self._months = self._convert(self.MONTHS)
|
298 |
+
self._hms = self._convert(self.HMS)
|
299 |
+
self._ampm = self._convert(self.AMPM)
|
300 |
+
self._utczone = self._convert(self.UTCZONE)
|
301 |
+
self._pertain = self._convert(self.PERTAIN)
|
302 |
+
|
303 |
+
self.dayfirst = dayfirst
|
304 |
+
self.yearfirst = yearfirst
|
305 |
+
|
306 |
+
self._year = time.localtime().tm_year
|
307 |
+
self._century = self._year // 100 * 100
|
308 |
+
|
309 |
+
def _convert(self, lst):
|
310 |
+
dct = {}
|
311 |
+
for i, v in enumerate(lst):
|
312 |
+
if isinstance(v, tuple):
|
313 |
+
for v in v:
|
314 |
+
dct[v.lower()] = i
|
315 |
+
else:
|
316 |
+
dct[v.lower()] = i
|
317 |
+
return dct
|
318 |
+
|
319 |
+
def jump(self, name):
|
320 |
+
return name.lower() in self._jump
|
321 |
+
|
322 |
+
def weekday(self, name):
|
323 |
+
try:
|
324 |
+
return self._weekdays[name.lower()]
|
325 |
+
except KeyError:
|
326 |
+
pass
|
327 |
+
return None
|
328 |
+
|
329 |
+
def month(self, name):
|
330 |
+
try:
|
331 |
+
return self._months[name.lower()] + 1
|
332 |
+
except KeyError:
|
333 |
+
pass
|
334 |
+
return None
|
335 |
+
|
336 |
+
def hms(self, name):
|
337 |
+
try:
|
338 |
+
return self._hms[name.lower()]
|
339 |
+
except KeyError:
|
340 |
+
return None
|
341 |
+
|
342 |
+
def ampm(self, name):
|
343 |
+
try:
|
344 |
+
return self._ampm[name.lower()]
|
345 |
+
except KeyError:
|
346 |
+
return None
|
347 |
+
|
348 |
+
def pertain(self, name):
|
349 |
+
return name.lower() in self._pertain
|
350 |
+
|
351 |
+
def utczone(self, name):
|
352 |
+
return name.lower() in self._utczone
|
353 |
+
|
354 |
+
def tzoffset(self, name):
|
355 |
+
if name in self._utczone:
|
356 |
+
return 0
|
357 |
+
|
358 |
+
return self.TZOFFSET.get(name)
|
359 |
+
|
360 |
+
def convertyear(self, year, century_specified=False):
|
361 |
+
"""
|
362 |
+
Converts two-digit years to year within [-50, 49]
|
363 |
+
range of self._year (current local time)
|
364 |
+
"""
|
365 |
+
|
366 |
+
# Function contract is that the year is always positive
|
367 |
+
assert year >= 0
|
368 |
+
|
369 |
+
if year < 100 and not century_specified:
|
370 |
+
# assume current century to start
|
371 |
+
year += self._century
|
372 |
+
|
373 |
+
if year >= self._year + 50: # if too far in future
|
374 |
+
year -= 100
|
375 |
+
elif year < self._year - 50: # if too far in past
|
376 |
+
year += 100
|
377 |
+
|
378 |
+
return year
|
379 |
+
|
380 |
+
def validate(self, res):
|
381 |
+
# move to info
|
382 |
+
if res.year is not None:
|
383 |
+
res.year = self.convertyear(res.year, res.century_specified)
|
384 |
+
|
385 |
+
if ((res.tzoffset == 0 and not res.tzname) or
|
386 |
+
(res.tzname == 'Z' or res.tzname == 'z')):
|
387 |
+
res.tzname = "UTC"
|
388 |
+
res.tzoffset = 0
|
389 |
+
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
|
390 |
+
res.tzoffset = 0
|
391 |
+
return True
|
392 |
+
|
393 |
+
|
394 |
+
class _ymd(list):
|
395 |
+
def __init__(self, *args, **kwargs):
|
396 |
+
super(self.__class__, self).__init__(*args, **kwargs)
|
397 |
+
self.century_specified = False
|
398 |
+
self.dstridx = None
|
399 |
+
self.mstridx = None
|
400 |
+
self.ystridx = None
|
401 |
+
|
402 |
+
@property
|
403 |
+
def has_year(self):
|
404 |
+
return self.ystridx is not None
|
405 |
+
|
406 |
+
@property
|
407 |
+
def has_month(self):
|
408 |
+
return self.mstridx is not None
|
409 |
+
|
410 |
+
@property
|
411 |
+
def has_day(self):
|
412 |
+
return self.dstridx is not None
|
413 |
+
|
414 |
+
def could_be_day(self, value):
|
415 |
+
if self.has_day:
|
416 |
+
return False
|
417 |
+
elif not self.has_month:
|
418 |
+
return 1 <= value <= 31
|
419 |
+
elif not self.has_year:
|
420 |
+
# Be permissive, assume leap year
|
421 |
+
month = self[self.mstridx]
|
422 |
+
return 1 <= value <= monthrange(2000, month)[1]
|
423 |
+
else:
|
424 |
+
month = self[self.mstridx]
|
425 |
+
year = self[self.ystridx]
|
426 |
+
return 1 <= value <= monthrange(year, month)[1]
|
427 |
+
|
428 |
+
def append(self, val, label=None):
|
429 |
+
if hasattr(val, '__len__'):
|
430 |
+
if val.isdigit() and len(val) > 2:
|
431 |
+
self.century_specified = True
|
432 |
+
if label not in [None, 'Y']: # pragma: no cover
|
433 |
+
raise ValueError(label)
|
434 |
+
label = 'Y'
|
435 |
+
elif val > 100:
|
436 |
+
self.century_specified = True
|
437 |
+
if label not in [None, 'Y']: # pragma: no cover
|
438 |
+
raise ValueError(label)
|
439 |
+
label = 'Y'
|
440 |
+
|
441 |
+
super(self.__class__, self).append(int(val))
|
442 |
+
|
443 |
+
if label == 'M':
|
444 |
+
if self.has_month:
|
445 |
+
raise ValueError('Month is already set')
|
446 |
+
self.mstridx = len(self) - 1
|
447 |
+
elif label == 'D':
|
448 |
+
if self.has_day:
|
449 |
+
raise ValueError('Day is already set')
|
450 |
+
self.dstridx = len(self) - 1
|
451 |
+
elif label == 'Y':
|
452 |
+
if self.has_year:
|
453 |
+
raise ValueError('Year is already set')
|
454 |
+
self.ystridx = len(self) - 1
|
455 |
+
|
456 |
+
def _resolve_from_stridxs(self, strids):
|
457 |
+
"""
|
458 |
+
Try to resolve the identities of year/month/day elements using
|
459 |
+
ystridx, mstridx, and dstridx, if enough of these are specified.
|
460 |
+
"""
|
461 |
+
if len(self) == 3 and len(strids) == 2:
|
462 |
+
# we can back out the remaining stridx value
|
463 |
+
missing = [x for x in range(3) if x not in strids.values()]
|
464 |
+
key = [x for x in ['y', 'm', 'd'] if x not in strids]
|
465 |
+
assert len(missing) == len(key) == 1
|
466 |
+
key = key[0]
|
467 |
+
val = missing[0]
|
468 |
+
strids[key] = val
|
469 |
+
|
470 |
+
assert len(self) == len(strids) # otherwise this should not be called
|
471 |
+
out = {key: self[strids[key]] for key in strids}
|
472 |
+
return (out.get('y'), out.get('m'), out.get('d'))
|
473 |
+
|
474 |
+
def resolve_ymd(self, yearfirst, dayfirst):
|
475 |
+
len_ymd = len(self)
|
476 |
+
year, month, day = (None, None, None)
|
477 |
+
|
478 |
+
strids = (('y', self.ystridx),
|
479 |
+
('m', self.mstridx),
|
480 |
+
('d', self.dstridx))
|
481 |
+
|
482 |
+
strids = {key: val for key, val in strids if val is not None}
|
483 |
+
if (len(self) == len(strids) > 0 or
|
484 |
+
(len(self) == 3 and len(strids) == 2)):
|
485 |
+
return self._resolve_from_stridxs(strids)
|
486 |
+
|
487 |
+
mstridx = self.mstridx
|
488 |
+
|
489 |
+
if len_ymd > 3:
|
490 |
+
raise ValueError("More than three YMD values")
|
491 |
+
elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
|
492 |
+
# One member, or two members with a month string
|
493 |
+
if mstridx is not None:
|
494 |
+
month = self[mstridx]
|
495 |
+
# since mstridx is 0 or 1, self[mstridx-1] always
|
496 |
+
# looks up the other element
|
497 |
+
other = self[mstridx - 1]
|
498 |
+
else:
|
499 |
+
other = self[0]
|
500 |
+
|
501 |
+
if len_ymd > 1 or mstridx is None:
|
502 |
+
if other > 31:
|
503 |
+
year = other
|
504 |
+
else:
|
505 |
+
day = other
|
506 |
+
|
507 |
+
elif len_ymd == 2:
|
508 |
+
# Two members with numbers
|
509 |
+
if self[0] > 31:
|
510 |
+
# 99-01
|
511 |
+
year, month = self
|
512 |
+
elif self[1] > 31:
|
513 |
+
# 01-99
|
514 |
+
month, year = self
|
515 |
+
elif dayfirst and self[1] <= 12:
|
516 |
+
# 13-01
|
517 |
+
day, month = self
|
518 |
+
else:
|
519 |
+
# 01-13
|
520 |
+
month, day = self
|
521 |
+
|
522 |
+
elif len_ymd == 3:
|
523 |
+
# Three members
|
524 |
+
if mstridx == 0:
|
525 |
+
if self[1] > 31:
|
526 |
+
# Apr-2003-25
|
527 |
+
month, year, day = self
|
528 |
+
else:
|
529 |
+
month, day, year = self
|
530 |
+
elif mstridx == 1:
|
531 |
+
if self[0] > 31 or (yearfirst and self[2] <= 31):
|
532 |
+
# 99-Jan-01
|
533 |
+
year, month, day = self
|
534 |
+
else:
|
535 |
+
# 01-Jan-01
|
536 |
+
# Give precedence to day-first, since
|
537 |
+
# two-digit years is usually hand-written.
|
538 |
+
day, month, year = self
|
539 |
+
|
540 |
+
elif mstridx == 2:
|
541 |
+
# WTF!?
|
542 |
+
if self[1] > 31:
|
543 |
+
# 01-99-Jan
|
544 |
+
day, year, month = self
|
545 |
+
else:
|
546 |
+
# 99-01-Jan
|
547 |
+
year, day, month = self
|
548 |
+
|
549 |
+
else:
|
550 |
+
if (self[0] > 31 or
|
551 |
+
self.ystridx == 0 or
|
552 |
+
(yearfirst and self[1] <= 12 and self[2] <= 31)):
|
553 |
+
# 99-01-01
|
554 |
+
if dayfirst and self[2] <= 12:
|
555 |
+
year, day, month = self
|
556 |
+
else:
|
557 |
+
year, month, day = self
|
558 |
+
elif self[0] > 12 or (dayfirst and self[1] <= 12):
|
559 |
+
# 13-01-01
|
560 |
+
day, month, year = self
|
561 |
+
else:
|
562 |
+
# 01-13-01
|
563 |
+
month, day, year = self
|
564 |
+
|
565 |
+
return year, month, day
|
566 |
+
|
567 |
+
|
568 |
+
class parser(object):
|
569 |
+
def __init__(self, info=None):
|
570 |
+
self.info = info or parserinfo()
|
571 |
+
|
572 |
+
def parse(self, timestr, default=None,
|
573 |
+
ignoretz=False, tzinfos=None, **kwargs):
|
574 |
+
"""
|
575 |
+
Parse the date/time string into a :class:`datetime.datetime` object.
|
576 |
+
|
577 |
+
:param timestr:
|
578 |
+
Any date/time string using the supported formats.
|
579 |
+
|
580 |
+
:param default:
|
581 |
+
The default datetime object, if this is a datetime object and not
|
582 |
+
``None``, elements specified in ``timestr`` replace elements in the
|
583 |
+
default object.
|
584 |
+
|
585 |
+
:param ignoretz:
|
586 |
+
If set ``True``, time zones in parsed strings are ignored and a
|
587 |
+
naive :class:`datetime.datetime` object is returned.
|
588 |
+
|
589 |
+
:param tzinfos:
|
590 |
+
Additional time zone names / aliases which may be present in the
|
591 |
+
string. This argument maps time zone names (and optionally offsets
|
592 |
+
from those time zones) to time zones. This parameter can be a
|
593 |
+
dictionary with timezone aliases mapping time zone names to time
|
594 |
+
zones or a function taking two parameters (``tzname`` and
|
595 |
+
``tzoffset``) and returning a time zone.
|
596 |
+
|
597 |
+
The timezones to which the names are mapped can be an integer
|
598 |
+
offset from UTC in seconds or a :class:`tzinfo` object.
|
599 |
+
|
600 |
+
.. doctest::
|
601 |
+
:options: +NORMALIZE_WHITESPACE
|
602 |
+
|
603 |
+
>>> from dateutil.parser import parse
|
604 |
+
>>> from dateutil.tz import gettz
|
605 |
+
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
|
606 |
+
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
|
607 |
+
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
|
608 |
+
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
|
609 |
+
datetime.datetime(2012, 1, 19, 17, 21,
|
610 |
+
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
|
611 |
+
|
612 |
+
This parameter is ignored if ``ignoretz`` is set.
|
613 |
+
|
614 |
+
:param \\*\\*kwargs:
|
615 |
+
Keyword arguments as passed to ``_parse()``.
|
616 |
+
|
617 |
+
:return:
|
618 |
+
Returns a :class:`datetime.datetime` object or, if the
|
619 |
+
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
|
620 |
+
first element being a :class:`datetime.datetime` object, the second
|
621 |
+
a tuple containing the fuzzy tokens.
|
622 |
+
|
623 |
+
:raises ParserError:
|
624 |
+
Raised for invalid or unknown string format, if the provided
|
625 |
+
:class:`tzinfo` is not in a valid format, or if an invalid date
|
626 |
+
would be created.
|
627 |
+
|
628 |
+
:raises TypeError:
|
629 |
+
Raised for non-string or character stream input.
|
630 |
+
|
631 |
+
:raises OverflowError:
|
632 |
+
Raised if the parsed date exceeds the largest valid C integer on
|
633 |
+
your system.
|
634 |
+
"""
|
635 |
+
|
636 |
+
if default is None:
|
637 |
+
default = datetime.datetime.now().replace(hour=0, minute=0,
|
638 |
+
second=0, microsecond=0)
|
639 |
+
|
640 |
+
res, skipped_tokens = self._parse(timestr, **kwargs)
|
641 |
+
|
642 |
+
if res is None:
|
643 |
+
raise ParserError("Unknown string format: %s", timestr)
|
644 |
+
|
645 |
+
if len(res) == 0:
|
646 |
+
raise ParserError("String does not contain a date: %s", timestr)
|
647 |
+
|
648 |
+
try:
|
649 |
+
ret = self._build_naive(res, default)
|
650 |
+
except ValueError as e:
|
651 |
+
six.raise_from(ParserError(str(e) + ": %s", timestr), e)
|
652 |
+
|
653 |
+
if not ignoretz:
|
654 |
+
ret = self._build_tzaware(ret, res, tzinfos)
|
655 |
+
|
656 |
+
if kwargs.get('fuzzy_with_tokens', False):
|
657 |
+
return ret, skipped_tokens
|
658 |
+
else:
|
659 |
+
return ret
|
660 |
+
|
661 |
+
class _result(_resultbase):
|
662 |
+
__slots__ = ["year", "month", "day", "weekday",
|
663 |
+
"hour", "minute", "second", "microsecond",
|
664 |
+
"tzname", "tzoffset", "ampm","any_unused_tokens"]
|
665 |
+
|
666 |
+
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
|
667 |
+
fuzzy_with_tokens=False):
|
668 |
+
"""
|
669 |
+
Private method which performs the heavy lifting of parsing, called from
|
670 |
+
``parse()``, which passes on its ``kwargs`` to this function.
|
671 |
+
|
672 |
+
:param timestr:
|
673 |
+
The string to parse.
|
674 |
+
|
675 |
+
:param dayfirst:
|
676 |
+
Whether to interpret the first value in an ambiguous 3-integer date
|
677 |
+
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
|
678 |
+
``yearfirst`` is set to ``True``, this distinguishes between YDM
|
679 |
+
and YMD. If set to ``None``, this value is retrieved from the
|
680 |
+
current :class:`parserinfo` object (which itself defaults to
|
681 |
+
``False``).
|
682 |
+
|
683 |
+
:param yearfirst:
|
684 |
+
Whether to interpret the first value in an ambiguous 3-integer date
|
685 |
+
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
|
686 |
+
to be the year, otherwise the last number is taken to be the year.
|
687 |
+
If this is set to ``None``, the value is retrieved from the current
|
688 |
+
:class:`parserinfo` object (which itself defaults to ``False``).
|
689 |
+
|
690 |
+
:param fuzzy:
|
691 |
+
Whether to allow fuzzy parsing, allowing for string like "Today is
|
692 |
+
January 1, 2047 at 8:21:00AM".
|
693 |
+
|
694 |
+
:param fuzzy_with_tokens:
|
695 |
+
If ``True``, ``fuzzy`` is automatically set to True, and the parser
|
696 |
+
will return a tuple where the first element is the parsed
|
697 |
+
:class:`datetime.datetime` datetimestamp and the second element is
|
698 |
+
a tuple containing the portions of the string which were ignored:
|
699 |
+
|
700 |
+
.. doctest::
|
701 |
+
|
702 |
+
>>> from dateutil.parser import parse
|
703 |
+
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
|
704 |
+
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
|
705 |
+
|
706 |
+
"""
|
707 |
+
if fuzzy_with_tokens:
|
708 |
+
fuzzy = True
|
709 |
+
|
710 |
+
info = self.info
|
711 |
+
|
712 |
+
if dayfirst is None:
|
713 |
+
dayfirst = info.dayfirst
|
714 |
+
|
715 |
+
if yearfirst is None:
|
716 |
+
yearfirst = info.yearfirst
|
717 |
+
|
718 |
+
res = self._result()
|
719 |
+
l = _timelex.split(timestr) # Splits the timestr into tokens
|
720 |
+
|
721 |
+
skipped_idxs = []
|
722 |
+
|
723 |
+
# year/month/day list
|
724 |
+
ymd = _ymd()
|
725 |
+
|
726 |
+
len_l = len(l)
|
727 |
+
i = 0
|
728 |
+
try:
|
729 |
+
while i < len_l:
|
730 |
+
|
731 |
+
# Check if it's a number
|
732 |
+
value_repr = l[i]
|
733 |
+
try:
|
734 |
+
value = float(value_repr)
|
735 |
+
except ValueError:
|
736 |
+
value = None
|
737 |
+
|
738 |
+
if value is not None:
|
739 |
+
# Numeric token
|
740 |
+
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
|
741 |
+
|
742 |
+
# Check weekday
|
743 |
+
elif info.weekday(l[i]) is not None:
|
744 |
+
value = info.weekday(l[i])
|
745 |
+
res.weekday = value
|
746 |
+
|
747 |
+
# Check month name
|
748 |
+
elif info.month(l[i]) is not None:
|
749 |
+
value = info.month(l[i])
|
750 |
+
ymd.append(value, 'M')
|
751 |
+
|
752 |
+
if i + 1 < len_l:
|
753 |
+
if l[i + 1] in ('-', '/'):
|
754 |
+
# Jan-01[-99]
|
755 |
+
sep = l[i + 1]
|
756 |
+
ymd.append(l[i + 2])
|
757 |
+
|
758 |
+
if i + 3 < len_l and l[i + 3] == sep:
|
759 |
+
# Jan-01-99
|
760 |
+
ymd.append(l[i + 4])
|
761 |
+
i += 2
|
762 |
+
|
763 |
+
i += 2
|
764 |
+
|
765 |
+
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
|
766 |
+
info.pertain(l[i + 2])):
|
767 |
+
# Jan of 01
|
768 |
+
# In this case, 01 is clearly year
|
769 |
+
if l[i + 4].isdigit():
|
770 |
+
# Convert it here to become unambiguous
|
771 |
+
value = int(l[i + 4])
|
772 |
+
year = str(info.convertyear(value))
|
773 |
+
ymd.append(year, 'Y')
|
774 |
+
else:
|
775 |
+
# Wrong guess
|
776 |
+
pass
|
777 |
+
# TODO: not hit in tests
|
778 |
+
i += 4
|
779 |
+
|
780 |
+
# Check am/pm
|
781 |
+
elif info.ampm(l[i]) is not None:
|
782 |
+
value = info.ampm(l[i])
|
783 |
+
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
|
784 |
+
|
785 |
+
if val_is_ampm:
|
786 |
+
res.hour = self._adjust_ampm(res.hour, value)
|
787 |
+
res.ampm = value
|
788 |
+
|
789 |
+
elif fuzzy:
|
790 |
+
skipped_idxs.append(i)
|
791 |
+
|
792 |
+
# Check for a timezone name
|
793 |
+
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
|
794 |
+
res.tzname = l[i]
|
795 |
+
res.tzoffset = info.tzoffset(res.tzname)
|
796 |
+
|
797 |
+
# Check for something like GMT+3, or BRST+3. Notice
|
798 |
+
# that it doesn't mean "I am 3 hours after GMT", but
|
799 |
+
# "my time +3 is GMT". If found, we reverse the
|
800 |
+
# logic so that timezone parsing code will get it
|
801 |
+
# right.
|
802 |
+
if i + 1 < len_l and l[i + 1] in ('+', '-'):
|
803 |
+
l[i + 1] = ('+', '-')[l[i + 1] == '+']
|
804 |
+
res.tzoffset = None
|
805 |
+
if info.utczone(res.tzname):
|
806 |
+
# With something like GMT+3, the timezone
|
807 |
+
# is *not* GMT.
|
808 |
+
res.tzname = None
|
809 |
+
|
810 |
+
# Check for a numbered timezone
|
811 |
+
elif res.hour is not None and l[i] in ('+', '-'):
|
812 |
+
signal = (-1, 1)[l[i] == '+']
|
813 |
+
len_li = len(l[i + 1])
|
814 |
+
|
815 |
+
# TODO: check that l[i + 1] is integer?
|
816 |
+
if len_li == 4:
|
817 |
+
# -0300
|
818 |
+
hour_offset = int(l[i + 1][:2])
|
819 |
+
min_offset = int(l[i + 1][2:])
|
820 |
+
elif i + 2 < len_l and l[i + 2] == ':':
|
821 |
+
# -03:00
|
822 |
+
hour_offset = int(l[i + 1])
|
823 |
+
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
|
824 |
+
i += 2
|
825 |
+
elif len_li <= 2:
|
826 |
+
# -[0]3
|
827 |
+
hour_offset = int(l[i + 1][:2])
|
828 |
+
min_offset = 0
|
829 |
+
else:
|
830 |
+
raise ValueError(timestr)
|
831 |
+
|
832 |
+
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
|
833 |
+
|
834 |
+
# Look for a timezone name between parenthesis
|
835 |
+
if (i + 5 < len_l and
|
836 |
+
info.jump(l[i + 2]) and l[i + 3] == '(' and
|
837 |
+
l[i + 5] == ')' and
|
838 |
+
3 <= len(l[i + 4]) and
|
839 |
+
self._could_be_tzname(res.hour, res.tzname,
|
840 |
+
None, l[i + 4])):
|
841 |
+
# -0300 (BRST)
|
842 |
+
res.tzname = l[i + 4]
|
843 |
+
i += 4
|
844 |
+
|
845 |
+
i += 1
|
846 |
+
|
847 |
+
# Check jumps
|
848 |
+
elif not (info.jump(l[i]) or fuzzy):
|
849 |
+
raise ValueError(timestr)
|
850 |
+
|
851 |
+
else:
|
852 |
+
skipped_idxs.append(i)
|
853 |
+
i += 1
|
854 |
+
|
855 |
+
# Process year/month/day
|
856 |
+
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
|
857 |
+
|
858 |
+
res.century_specified = ymd.century_specified
|
859 |
+
res.year = year
|
860 |
+
res.month = month
|
861 |
+
res.day = day
|
862 |
+
|
863 |
+
except (IndexError, ValueError):
|
864 |
+
return None, None
|
865 |
+
|
866 |
+
if not info.validate(res):
|
867 |
+
return None, None
|
868 |
+
|
869 |
+
if fuzzy_with_tokens:
|
870 |
+
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
|
871 |
+
return res, tuple(skipped_tokens)
|
872 |
+
else:
|
873 |
+
return res, None
|
874 |
+
|
875 |
+
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
|
876 |
+
# Token is a number
|
877 |
+
value_repr = tokens[idx]
|
878 |
+
try:
|
879 |
+
value = self._to_decimal(value_repr)
|
880 |
+
except Exception as e:
|
881 |
+
six.raise_from(ValueError('Unknown numeric token'), e)
|
882 |
+
|
883 |
+
len_li = len(value_repr)
|
884 |
+
|
885 |
+
len_l = len(tokens)
|
886 |
+
|
887 |
+
if (len(ymd) == 3 and len_li in (2, 4) and
|
888 |
+
res.hour is None and
|
889 |
+
(idx + 1 >= len_l or
|
890 |
+
(tokens[idx + 1] != ':' and
|
891 |
+
info.hms(tokens[idx + 1]) is None))):
|
892 |
+
# 19990101T23[59]
|
893 |
+
s = tokens[idx]
|
894 |
+
res.hour = int(s[:2])
|
895 |
+
|
896 |
+
if len_li == 4:
|
897 |
+
res.minute = int(s[2:])
|
898 |
+
|
899 |
+
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
|
900 |
+
# YYMMDD or HHMMSS[.ss]
|
901 |
+
s = tokens[idx]
|
902 |
+
|
903 |
+
if not ymd and '.' not in tokens[idx]:
|
904 |
+
ymd.append(s[:2])
|
905 |
+
ymd.append(s[2:4])
|
906 |
+
ymd.append(s[4:])
|
907 |
+
else:
|
908 |
+
# 19990101T235959[.59]
|
909 |
+
|
910 |
+
# TODO: Check if res attributes already set.
|
911 |
+
res.hour = int(s[:2])
|
912 |
+
res.minute = int(s[2:4])
|
913 |
+
res.second, res.microsecond = self._parsems(s[4:])
|
914 |
+
|
915 |
+
elif len_li in (8, 12, 14):
|
916 |
+
# YYYYMMDD
|
917 |
+
s = tokens[idx]
|
918 |
+
ymd.append(s[:4], 'Y')
|
919 |
+
ymd.append(s[4:6])
|
920 |
+
ymd.append(s[6:8])
|
921 |
+
|
922 |
+
if len_li > 8:
|
923 |
+
res.hour = int(s[8:10])
|
924 |
+
res.minute = int(s[10:12])
|
925 |
+
|
926 |
+
if len_li > 12:
|
927 |
+
res.second = int(s[12:])
|
928 |
+
|
929 |
+
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
|
930 |
+
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
|
931 |
+
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
|
932 |
+
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
|
933 |
+
if hms is not None:
|
934 |
+
# TODO: checking that hour/minute/second are not
|
935 |
+
# already set?
|
936 |
+
self._assign_hms(res, value_repr, hms)
|
937 |
+
|
938 |
+
elif idx + 2 < len_l and tokens[idx + 1] == ':':
|
939 |
+
# HH:MM[:SS[.ss]]
|
940 |
+
res.hour = int(value)
|
941 |
+
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
|
942 |
+
(res.minute, res.second) = self._parse_min_sec(value)
|
943 |
+
|
944 |
+
if idx + 4 < len_l and tokens[idx + 3] == ':':
|
945 |
+
res.second, res.microsecond = self._parsems(tokens[idx + 4])
|
946 |
+
|
947 |
+
idx += 2
|
948 |
+
|
949 |
+
idx += 2
|
950 |
+
|
951 |
+
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
|
952 |
+
sep = tokens[idx + 1]
|
953 |
+
ymd.append(value_repr)
|
954 |
+
|
955 |
+
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
|
956 |
+
if tokens[idx + 2].isdigit():
|
957 |
+
# 01-01[-01]
|
958 |
+
ymd.append(tokens[idx + 2])
|
959 |
+
else:
|
960 |
+
# 01-Jan[-01]
|
961 |
+
value = info.month(tokens[idx + 2])
|
962 |
+
|
963 |
+
if value is not None:
|
964 |
+
ymd.append(value, 'M')
|
965 |
+
else:
|
966 |
+
raise ValueError()
|
967 |
+
|
968 |
+
if idx + 3 < len_l and tokens[idx + 3] == sep:
|
969 |
+
# We have three members
|
970 |
+
value = info.month(tokens[idx + 4])
|
971 |
+
|
972 |
+
if value is not None:
|
973 |
+
ymd.append(value, 'M')
|
974 |
+
else:
|
975 |
+
ymd.append(tokens[idx + 4])
|
976 |
+
idx += 2
|
977 |
+
|
978 |
+
idx += 1
|
979 |
+
idx += 1
|
980 |
+
|
981 |
+
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
|
982 |
+
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
|
983 |
+
# 12 am
|
984 |
+
hour = int(value)
|
985 |
+
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
|
986 |
+
idx += 1
|
987 |
+
else:
|
988 |
+
# Year, month or day
|
989 |
+
ymd.append(value)
|
990 |
+
idx += 1
|
991 |
+
|
992 |
+
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
|
993 |
+
# 12am
|
994 |
+
hour = int(value)
|
995 |
+
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
|
996 |
+
idx += 1
|
997 |
+
|
998 |
+
elif ymd.could_be_day(value):
|
999 |
+
ymd.append(value)
|
1000 |
+
|
1001 |
+
elif not fuzzy:
|
1002 |
+
raise ValueError()
|
1003 |
+
|
1004 |
+
return idx
|
1005 |
+
|
1006 |
+
def _find_hms_idx(self, idx, tokens, info, allow_jump):
|
1007 |
+
len_l = len(tokens)
|
1008 |
+
|
1009 |
+
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
|
1010 |
+
# There is an "h", "m", or "s" label following this token. We take
|
1011 |
+
# assign the upcoming label to the current token.
|
1012 |
+
# e.g. the "12" in 12h"
|
1013 |
+
hms_idx = idx + 1
|
1014 |
+
|
1015 |
+
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
|
1016 |
+
info.hms(tokens[idx+2]) is not None):
|
1017 |
+
# There is a space and then an "h", "m", or "s" label.
|
1018 |
+
# e.g. the "12" in "12 h"
|
1019 |
+
hms_idx = idx + 2
|
1020 |
+
|
1021 |
+
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
|
1022 |
+
# There is a "h", "m", or "s" preceding this token. Since neither
|
1023 |
+
# of the previous cases was hit, there is no label following this
|
1024 |
+
# token, so we use the previous label.
|
1025 |
+
# e.g. the "04" in "12h04"
|
1026 |
+
hms_idx = idx-1
|
1027 |
+
|
1028 |
+
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
|
1029 |
+
info.hms(tokens[idx-2]) is not None):
|
1030 |
+
# If we are looking at the final token, we allow for a
|
1031 |
+
# backward-looking check to skip over a space.
|
1032 |
+
# TODO: Are we sure this is the right condition here?
|
1033 |
+
hms_idx = idx - 2
|
1034 |
+
|
1035 |
+
else:
|
1036 |
+
hms_idx = None
|
1037 |
+
|
1038 |
+
return hms_idx
|
1039 |
+
|
1040 |
+
def _assign_hms(self, res, value_repr, hms):
|
1041 |
+
# See GH issue #427, fixing float rounding
|
1042 |
+
value = self._to_decimal(value_repr)
|
1043 |
+
|
1044 |
+
if hms == 0:
|
1045 |
+
# Hour
|
1046 |
+
res.hour = int(value)
|
1047 |
+
if value % 1:
|
1048 |
+
res.minute = int(60*(value % 1))
|
1049 |
+
|
1050 |
+
elif hms == 1:
|
1051 |
+
(res.minute, res.second) = self._parse_min_sec(value)
|
1052 |
+
|
1053 |
+
elif hms == 2:
|
1054 |
+
(res.second, res.microsecond) = self._parsems(value_repr)
|
1055 |
+
|
1056 |
+
def _could_be_tzname(self, hour, tzname, tzoffset, token):
|
1057 |
+
return (hour is not None and
|
1058 |
+
tzname is None and
|
1059 |
+
tzoffset is None and
|
1060 |
+
len(token) <= 5 and
|
1061 |
+
(all(x in string.ascii_uppercase for x in token)
|
1062 |
+
or token in self.info.UTCZONE))
|
1063 |
+
|
1064 |
+
def _ampm_valid(self, hour, ampm, fuzzy):
|
1065 |
+
"""
|
1066 |
+
For fuzzy parsing, 'a' or 'am' (both valid English words)
|
1067 |
+
may erroneously trigger the AM/PM flag. Deal with that
|
1068 |
+
here.
|
1069 |
+
"""
|
1070 |
+
val_is_ampm = True
|
1071 |
+
|
1072 |
+
# If there's already an AM/PM flag, this one isn't one.
|
1073 |
+
if fuzzy and ampm is not None:
|
1074 |
+
val_is_ampm = False
|
1075 |
+
|
1076 |
+
# If AM/PM is found and hour is not, raise a ValueError
|
1077 |
+
if hour is None:
|
1078 |
+
if fuzzy:
|
1079 |
+
val_is_ampm = False
|
1080 |
+
else:
|
1081 |
+
raise ValueError('No hour specified with AM or PM flag.')
|
1082 |
+
elif not 0 <= hour <= 12:
|
1083 |
+
# If AM/PM is found, it's a 12 hour clock, so raise
|
1084 |
+
# an error for invalid range
|
1085 |
+
if fuzzy:
|
1086 |
+
val_is_ampm = False
|
1087 |
+
else:
|
1088 |
+
raise ValueError('Invalid hour specified for 12-hour clock.')
|
1089 |
+
|
1090 |
+
return val_is_ampm
|
1091 |
+
|
1092 |
+
def _adjust_ampm(self, hour, ampm):
|
1093 |
+
if hour < 12 and ampm == 1:
|
1094 |
+
hour += 12
|
1095 |
+
elif hour == 12 and ampm == 0:
|
1096 |
+
hour = 0
|
1097 |
+
return hour
|
1098 |
+
|
1099 |
+
def _parse_min_sec(self, value):
|
1100 |
+
# TODO: Every usage of this function sets res.second to the return
|
1101 |
+
# value. Are there any cases where second will be returned as None and
|
1102 |
+
# we *don't* want to set res.second = None?
|
1103 |
+
minute = int(value)
|
1104 |
+
second = None
|
1105 |
+
|
1106 |
+
sec_remainder = value % 1
|
1107 |
+
if sec_remainder:
|
1108 |
+
second = int(60 * sec_remainder)
|
1109 |
+
return (minute, second)
|
1110 |
+
|
1111 |
+
def _parse_hms(self, idx, tokens, info, hms_idx):
|
1112 |
+
# TODO: Is this going to admit a lot of false-positives for when we
|
1113 |
+
# just happen to have digits and "h", "m" or "s" characters in non-date
|
1114 |
+
# text? I guess hex hashes won't have that problem, but there's plenty
|
1115 |
+
# of random junk out there.
|
1116 |
+
if hms_idx is None:
|
1117 |
+
hms = None
|
1118 |
+
new_idx = idx
|
1119 |
+
elif hms_idx > idx:
|
1120 |
+
hms = info.hms(tokens[hms_idx])
|
1121 |
+
new_idx = hms_idx
|
1122 |
+
else:
|
1123 |
+
# Looking backwards, increment one.
|
1124 |
+
hms = info.hms(tokens[hms_idx]) + 1
|
1125 |
+
new_idx = idx
|
1126 |
+
|
1127 |
+
return (new_idx, hms)
|
1128 |
+
|
1129 |
+
# ------------------------------------------------------------------
|
1130 |
+
# Handling for individual tokens. These are kept as methods instead
|
1131 |
+
# of functions for the sake of customizability via subclassing.
|
1132 |
+
|
1133 |
+
def _parsems(self, value):
|
1134 |
+
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
|
1135 |
+
if "." not in value:
|
1136 |
+
return int(value), 0
|
1137 |
+
else:
|
1138 |
+
i, f = value.split(".")
|
1139 |
+
return int(i), int(f.ljust(6, "0")[:6])
|
1140 |
+
|
1141 |
+
def _to_decimal(self, val):
|
1142 |
+
try:
|
1143 |
+
decimal_value = Decimal(val)
|
1144 |
+
# See GH 662, edge case, infinite value should not be converted
|
1145 |
+
# via `_to_decimal`
|
1146 |
+
if not decimal_value.is_finite():
|
1147 |
+
raise ValueError("Converted decimal value is infinite or NaN")
|
1148 |
+
except Exception as e:
|
1149 |
+
msg = "Could not convert %s to decimal" % val
|
1150 |
+
six.raise_from(ValueError(msg), e)
|
1151 |
+
else:
|
1152 |
+
return decimal_value
|
1153 |
+
|
1154 |
+
# ------------------------------------------------------------------
|
1155 |
+
# Post-Parsing construction of datetime output. These are kept as
|
1156 |
+
# methods instead of functions for the sake of customizability via
|
1157 |
+
# subclassing.
|
1158 |
+
|
1159 |
+
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
|
1160 |
+
if callable(tzinfos):
|
1161 |
+
tzdata = tzinfos(tzname, tzoffset)
|
1162 |
+
else:
|
1163 |
+
tzdata = tzinfos.get(tzname)
|
1164 |
+
# handle case where tzinfo is paased an options that returns None
|
1165 |
+
# eg tzinfos = {'BRST' : None}
|
1166 |
+
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
|
1167 |
+
tzinfo = tzdata
|
1168 |
+
elif isinstance(tzdata, text_type):
|
1169 |
+
tzinfo = tz.tzstr(tzdata)
|
1170 |
+
elif isinstance(tzdata, integer_types):
|
1171 |
+
tzinfo = tz.tzoffset(tzname, tzdata)
|
1172 |
+
else:
|
1173 |
+
raise TypeError("Offset must be tzinfo subclass, tz string, "
|
1174 |
+
"or int offset.")
|
1175 |
+
return tzinfo
|
1176 |
+
|
1177 |
+
def _build_tzaware(self, naive, res, tzinfos):
|
1178 |
+
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
|
1179 |
+
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
|
1180 |
+
aware = naive.replace(tzinfo=tzinfo)
|
1181 |
+
aware = self._assign_tzname(aware, res.tzname)
|
1182 |
+
|
1183 |
+
elif res.tzname and res.tzname in time.tzname:
|
1184 |
+
aware = naive.replace(tzinfo=tz.tzlocal())
|
1185 |
+
|
1186 |
+
# Handle ambiguous local datetime
|
1187 |
+
aware = self._assign_tzname(aware, res.tzname)
|
1188 |
+
|
1189 |
+
# This is mostly relevant for winter GMT zones parsed in the UK
|
1190 |
+
if (aware.tzname() != res.tzname and
|
1191 |
+
res.tzname in self.info.UTCZONE):
|
1192 |
+
aware = aware.replace(tzinfo=tz.UTC)
|
1193 |
+
|
1194 |
+
elif res.tzoffset == 0:
|
1195 |
+
aware = naive.replace(tzinfo=tz.UTC)
|
1196 |
+
|
1197 |
+
elif res.tzoffset:
|
1198 |
+
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
|
1199 |
+
|
1200 |
+
elif not res.tzname and not res.tzoffset:
|
1201 |
+
# i.e. no timezone information was found.
|
1202 |
+
aware = naive
|
1203 |
+
|
1204 |
+
elif res.tzname:
|
1205 |
+
# tz-like string was parsed but we don't know what to do
|
1206 |
+
# with it
|
1207 |
+
warnings.warn("tzname {tzname} identified but not understood. "
|
1208 |
+
"Pass `tzinfos` argument in order to correctly "
|
1209 |
+
"return a timezone-aware datetime. In a future "
|
1210 |
+
"version, this will raise an "
|
1211 |
+
"exception.".format(tzname=res.tzname),
|
1212 |
+
category=UnknownTimezoneWarning)
|
1213 |
+
aware = naive
|
1214 |
+
|
1215 |
+
return aware
|
1216 |
+
|
1217 |
+
def _build_naive(self, res, default):
|
1218 |
+
repl = {}
|
1219 |
+
for attr in ("year", "month", "day", "hour",
|
1220 |
+
"minute", "second", "microsecond"):
|
1221 |
+
value = getattr(res, attr)
|
1222 |
+
if value is not None:
|
1223 |
+
repl[attr] = value
|
1224 |
+
|
1225 |
+
if 'day' not in repl:
|
1226 |
+
# If the default day exceeds the last day of the month, fall back
|
1227 |
+
# to the end of the month.
|
1228 |
+
cyear = default.year if res.year is None else res.year
|
1229 |
+
cmonth = default.month if res.month is None else res.month
|
1230 |
+
cday = default.day if res.day is None else res.day
|
1231 |
+
|
1232 |
+
if cday > monthrange(cyear, cmonth)[1]:
|
1233 |
+
repl['day'] = monthrange(cyear, cmonth)[1]
|
1234 |
+
|
1235 |
+
naive = default.replace(**repl)
|
1236 |
+
|
1237 |
+
if res.weekday is not None and not res.day:
|
1238 |
+
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
|
1239 |
+
|
1240 |
+
return naive
|
1241 |
+
|
1242 |
+
def _assign_tzname(self, dt, tzname):
|
1243 |
+
if dt.tzname() != tzname:
|
1244 |
+
new_dt = tz.enfold(dt, fold=1)
|
1245 |
+
if new_dt.tzname() == tzname:
|
1246 |
+
return new_dt
|
1247 |
+
|
1248 |
+
return dt
|
1249 |
+
|
1250 |
+
def _recombine_skipped(self, tokens, skipped_idxs):
|
1251 |
+
"""
|
1252 |
+
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
|
1253 |
+
>>> skipped_idxs = [0, 1, 2, 5]
|
1254 |
+
>>> _recombine_skipped(tokens, skipped_idxs)
|
1255 |
+
["foo bar", "baz"]
|
1256 |
+
"""
|
1257 |
+
skipped_tokens = []
|
1258 |
+
for i, idx in enumerate(sorted(skipped_idxs)):
|
1259 |
+
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
|
1260 |
+
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
|
1261 |
+
else:
|
1262 |
+
skipped_tokens.append(tokens[idx])
|
1263 |
+
|
1264 |
+
return skipped_tokens
|
1265 |
+
|
1266 |
+
|
1267 |
+
DEFAULTPARSER = parser()
|
1268 |
+
|
1269 |
+
|
1270 |
+
def parse(timestr, parserinfo=None, **kwargs):
|
1271 |
+
"""
|
1272 |
+
|
1273 |
+
Parse a string in one of the supported formats, using the
|
1274 |
+
``parserinfo`` parameters.
|
1275 |
+
|
1276 |
+
:param timestr:
|
1277 |
+
A string containing a date/time stamp.
|
1278 |
+
|
1279 |
+
:param parserinfo:
|
1280 |
+
A :class:`parserinfo` object containing parameters for the parser.
|
1281 |
+
If ``None``, the default arguments to the :class:`parserinfo`
|
1282 |
+
constructor are used.
|
1283 |
+
|
1284 |
+
The ``**kwargs`` parameter takes the following keyword arguments:
|
1285 |
+
|
1286 |
+
:param default:
|
1287 |
+
The default datetime object, if this is a datetime object and not
|
1288 |
+
``None``, elements specified in ``timestr`` replace elements in the
|
1289 |
+
default object.
|
1290 |
+
|
1291 |
+
:param ignoretz:
|
1292 |
+
If set ``True``, time zones in parsed strings are ignored and a naive
|
1293 |
+
:class:`datetime` object is returned.
|
1294 |
+
|
1295 |
+
:param tzinfos:
|
1296 |
+
Additional time zone names / aliases which may be present in the
|
1297 |
+
string. This argument maps time zone names (and optionally offsets
|
1298 |
+
from those time zones) to time zones. This parameter can be a
|
1299 |
+
dictionary with timezone aliases mapping time zone names to time
|
1300 |
+
zones or a function taking two parameters (``tzname`` and
|
1301 |
+
``tzoffset``) and returning a time zone.
|
1302 |
+
|
1303 |
+
The timezones to which the names are mapped can be an integer
|
1304 |
+
offset from UTC in seconds or a :class:`tzinfo` object.
|
1305 |
+
|
1306 |
+
.. doctest::
|
1307 |
+
:options: +NORMALIZE_WHITESPACE
|
1308 |
+
|
1309 |
+
>>> from dateutil.parser import parse
|
1310 |
+
>>> from dateutil.tz import gettz
|
1311 |
+
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
|
1312 |
+
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
|
1313 |
+
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
|
1314 |
+
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
|
1315 |
+
datetime.datetime(2012, 1, 19, 17, 21,
|
1316 |
+
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
|
1317 |
+
|
1318 |
+
This parameter is ignored if ``ignoretz`` is set.
|
1319 |
+
|
1320 |
+
:param dayfirst:
|
1321 |
+
Whether to interpret the first value in an ambiguous 3-integer date
|
1322 |
+
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
|
1323 |
+
``yearfirst`` is set to ``True``, this distinguishes between YDM and
|
1324 |
+
YMD. If set to ``None``, this value is retrieved from the current
|
1325 |
+
:class:`parserinfo` object (which itself defaults to ``False``).
|
1326 |
+
|
1327 |
+
:param yearfirst:
|
1328 |
+
Whether to interpret the first value in an ambiguous 3-integer date
|
1329 |
+
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
|
1330 |
+
be the year, otherwise the last number is taken to be the year. If
|
1331 |
+
this is set to ``None``, the value is retrieved from the current
|
1332 |
+
:class:`parserinfo` object (which itself defaults to ``False``).
|
1333 |
+
|
1334 |
+
:param fuzzy:
|
1335 |
+
Whether to allow fuzzy parsing, allowing for string like "Today is
|
1336 |
+
January 1, 2047 at 8:21:00AM".
|
1337 |
+
|
1338 |
+
:param fuzzy_with_tokens:
|
1339 |
+
If ``True``, ``fuzzy`` is automatically set to True, and the parser
|
1340 |
+
will return a tuple where the first element is the parsed
|
1341 |
+
:class:`datetime.datetime` datetimestamp and the second element is
|
1342 |
+
a tuple containing the portions of the string which were ignored:
|
1343 |
+
|
1344 |
+
.. doctest::
|
1345 |
+
|
1346 |
+
>>> from dateutil.parser import parse
|
1347 |
+
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
|
1348 |
+
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
|
1349 |
+
|
1350 |
+
:return:
|
1351 |
+
Returns a :class:`datetime.datetime` object or, if the
|
1352 |
+
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
|
1353 |
+
first element being a :class:`datetime.datetime` object, the second
|
1354 |
+
a tuple containing the fuzzy tokens.
|
1355 |
+
|
1356 |
+
:raises ParserError:
|
1357 |
+
Raised for invalid or unknown string formats, if the provided
|
1358 |
+
:class:`tzinfo` is not in a valid format, or if an invalid date would
|
1359 |
+
be created.
|
1360 |
+
|
1361 |
+
:raises OverflowError:
|
1362 |
+
Raised if the parsed date exceeds the largest valid C integer on
|
1363 |
+
your system.
|
1364 |
+
"""
|
1365 |
+
if parserinfo:
|
1366 |
+
return parser(parserinfo).parse(timestr, **kwargs)
|
1367 |
+
else:
|
1368 |
+
return DEFAULTPARSER.parse(timestr, **kwargs)
|
1369 |
+
|
1370 |
+
|
1371 |
+
class _tzparser(object):
|
1372 |
+
|
1373 |
+
class _result(_resultbase):
|
1374 |
+
|
1375 |
+
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
|
1376 |
+
"start", "end"]
|
1377 |
+
|
1378 |
+
class _attr(_resultbase):
|
1379 |
+
__slots__ = ["month", "week", "weekday",
|
1380 |
+
"yday", "jyday", "day", "time"]
|
1381 |
+
|
1382 |
+
def __repr__(self):
|
1383 |
+
return self._repr("")
|
1384 |
+
|
1385 |
+
def __init__(self):
|
1386 |
+
_resultbase.__init__(self)
|
1387 |
+
self.start = self._attr()
|
1388 |
+
self.end = self._attr()
|
1389 |
+
|
1390 |
+
def parse(self, tzstr):
|
1391 |
+
res = self._result()
|
1392 |
+
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
|
1393 |
+
used_idxs = list()
|
1394 |
+
try:
|
1395 |
+
|
1396 |
+
len_l = len(l)
|
1397 |
+
|
1398 |
+
i = 0
|
1399 |
+
while i < len_l:
|
1400 |
+
# BRST+3[BRDT[+2]]
|
1401 |
+
j = i
|
1402 |
+
while j < len_l and not [x for x in l[j]
|
1403 |
+
if x in "0123456789:,-+"]:
|
1404 |
+
j += 1
|
1405 |
+
if j != i:
|
1406 |
+
if not res.stdabbr:
|
1407 |
+
offattr = "stdoffset"
|
1408 |
+
res.stdabbr = "".join(l[i:j])
|
1409 |
+
else:
|
1410 |
+
offattr = "dstoffset"
|
1411 |
+
res.dstabbr = "".join(l[i:j])
|
1412 |
+
|
1413 |
+
for ii in range(j):
|
1414 |
+
used_idxs.append(ii)
|
1415 |
+
i = j
|
1416 |
+
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
|
1417 |
+
"0123456789")):
|
1418 |
+
if l[i] in ('+', '-'):
|
1419 |
+
# Yes, that's right. See the TZ variable
|
1420 |
+
# documentation.
|
1421 |
+
signal = (1, -1)[l[i] == '+']
|
1422 |
+
used_idxs.append(i)
|
1423 |
+
i += 1
|
1424 |
+
else:
|
1425 |
+
signal = -1
|
1426 |
+
len_li = len(l[i])
|
1427 |
+
if len_li == 4:
|
1428 |
+
# -0300
|
1429 |
+
setattr(res, offattr, (int(l[i][:2]) * 3600 +
|
1430 |
+
int(l[i][2:]) * 60) * signal)
|
1431 |
+
elif i + 1 < len_l and l[i + 1] == ':':
|
1432 |
+
# -03:00
|
1433 |
+
setattr(res, offattr,
|
1434 |
+
(int(l[i]) * 3600 +
|
1435 |
+
int(l[i + 2]) * 60) * signal)
|
1436 |
+
used_idxs.append(i)
|
1437 |
+
i += 2
|
1438 |
+
elif len_li <= 2:
|
1439 |
+
# -[0]3
|
1440 |
+
setattr(res, offattr,
|
1441 |
+
int(l[i][:2]) * 3600 * signal)
|
1442 |
+
else:
|
1443 |
+
return None
|
1444 |
+
used_idxs.append(i)
|
1445 |
+
i += 1
|
1446 |
+
if res.dstabbr:
|
1447 |
+
break
|
1448 |
+
else:
|
1449 |
+
break
|
1450 |
+
|
1451 |
+
|
1452 |
+
if i < len_l:
|
1453 |
+
for j in range(i, len_l):
|
1454 |
+
if l[j] == ';':
|
1455 |
+
l[j] = ','
|
1456 |
+
|
1457 |
+
assert l[i] == ','
|
1458 |
+
|
1459 |
+
i += 1
|
1460 |
+
|
1461 |
+
if i >= len_l:
|
1462 |
+
pass
|
1463 |
+
elif (8 <= l.count(',') <= 9 and
|
1464 |
+
not [y for x in l[i:] if x != ','
|
1465 |
+
for y in x if y not in "0123456789+-"]):
|
1466 |
+
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
|
1467 |
+
for x in (res.start, res.end):
|
1468 |
+
x.month = int(l[i])
|
1469 |
+
used_idxs.append(i)
|
1470 |
+
i += 2
|
1471 |
+
if l[i] == '-':
|
1472 |
+
value = int(l[i + 1]) * -1
|
1473 |
+
used_idxs.append(i)
|
1474 |
+
i += 1
|
1475 |
+
else:
|
1476 |
+
value = int(l[i])
|
1477 |
+
used_idxs.append(i)
|
1478 |
+
i += 2
|
1479 |
+
if value:
|
1480 |
+
x.week = value
|
1481 |
+
x.weekday = (int(l[i]) - 1) % 7
|
1482 |
+
else:
|
1483 |
+
x.day = int(l[i])
|
1484 |
+
used_idxs.append(i)
|
1485 |
+
i += 2
|
1486 |
+
x.time = int(l[i])
|
1487 |
+
used_idxs.append(i)
|
1488 |
+
i += 2
|
1489 |
+
if i < len_l:
|
1490 |
+
if l[i] in ('-', '+'):
|
1491 |
+
signal = (-1, 1)[l[i] == "+"]
|
1492 |
+
used_idxs.append(i)
|
1493 |
+
i += 1
|
1494 |
+
else:
|
1495 |
+
signal = 1
|
1496 |
+
used_idxs.append(i)
|
1497 |
+
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
|
1498 |
+
|
1499 |
+
# This was a made-up format that is not in normal use
|
1500 |
+
warn(('Parsed time zone "%s"' % tzstr) +
|
1501 |
+
'is in a non-standard dateutil-specific format, which ' +
|
1502 |
+
'is now deprecated; support for parsing this format ' +
|
1503 |
+
'will be removed in future versions. It is recommended ' +
|
1504 |
+
'that you switch to a standard format like the GNU ' +
|
1505 |
+
'TZ variable format.', tz.DeprecatedTzFormatWarning)
|
1506 |
+
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
|
1507 |
+
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
|
1508 |
+
'.', '-', ':')
|
1509 |
+
for y in x if y not in "0123456789"]):
|
1510 |
+
for x in (res.start, res.end):
|
1511 |
+
if l[i] == 'J':
|
1512 |
+
# non-leap year day (1 based)
|
1513 |
+
used_idxs.append(i)
|
1514 |
+
i += 1
|
1515 |
+
x.jyday = int(l[i])
|
1516 |
+
elif l[i] == 'M':
|
1517 |
+
# month[-.]week[-.]weekday
|
1518 |
+
used_idxs.append(i)
|
1519 |
+
i += 1
|
1520 |
+
x.month = int(l[i])
|
1521 |
+
used_idxs.append(i)
|
1522 |
+
i += 1
|
1523 |
+
assert l[i] in ('-', '.')
|
1524 |
+
used_idxs.append(i)
|
1525 |
+
i += 1
|
1526 |
+
x.week = int(l[i])
|
1527 |
+
if x.week == 5:
|
1528 |
+
x.week = -1
|
1529 |
+
used_idxs.append(i)
|
1530 |
+
i += 1
|
1531 |
+
assert l[i] in ('-', '.')
|
1532 |
+
used_idxs.append(i)
|
1533 |
+
i += 1
|
1534 |
+
x.weekday = (int(l[i]) - 1) % 7
|
1535 |
+
else:
|
1536 |
+
# year day (zero based)
|
1537 |
+
x.yday = int(l[i]) + 1
|
1538 |
+
|
1539 |
+
used_idxs.append(i)
|
1540 |
+
i += 1
|
1541 |
+
|
1542 |
+
if i < len_l and l[i] == '/':
|
1543 |
+
used_idxs.append(i)
|
1544 |
+
i += 1
|
1545 |
+
# start time
|
1546 |
+
len_li = len(l[i])
|
1547 |
+
if len_li == 4:
|
1548 |
+
# -0300
|
1549 |
+
x.time = (int(l[i][:2]) * 3600 +
|
1550 |
+
int(l[i][2:]) * 60)
|
1551 |
+
elif i + 1 < len_l and l[i + 1] == ':':
|
1552 |
+
# -03:00
|
1553 |
+
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
|
1554 |
+
used_idxs.append(i)
|
1555 |
+
i += 2
|
1556 |
+
if i + 1 < len_l and l[i + 1] == ':':
|
1557 |
+
used_idxs.append(i)
|
1558 |
+
i += 2
|
1559 |
+
x.time += int(l[i])
|
1560 |
+
elif len_li <= 2:
|
1561 |
+
# -[0]3
|
1562 |
+
x.time = (int(l[i][:2]) * 3600)
|
1563 |
+
else:
|
1564 |
+
return None
|
1565 |
+
used_idxs.append(i)
|
1566 |
+
i += 1
|
1567 |
+
|
1568 |
+
assert i == len_l or l[i] == ','
|
1569 |
+
|
1570 |
+
i += 1
|
1571 |
+
|
1572 |
+
assert i >= len_l
|
1573 |
+
|
1574 |
+
except (IndexError, ValueError, AssertionError):
|
1575 |
+
return None
|
1576 |
+
|
1577 |
+
unused_idxs = set(range(len_l)).difference(used_idxs)
|
1578 |
+
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
|
1579 |
+
return res
|
1580 |
+
|
1581 |
+
|
1582 |
+
DEFAULTTZPARSER = _tzparser()
|
1583 |
+
|
1584 |
+
|
1585 |
+
def _parsetz(tzstr):
|
1586 |
+
return DEFAULTTZPARSER.parse(tzstr)
|
1587 |
+
|
1588 |
+
|
1589 |
+
class ParserError(ValueError):
|
1590 |
+
"""Exception subclass used for any failure to parse a datetime string.
|
1591 |
+
|
1592 |
+
This is a subclass of :py:exc:`ValueError`, and should be raised any time
|
1593 |
+
earlier versions of ``dateutil`` would have raised ``ValueError``.
|
1594 |
+
|
1595 |
+
.. versionadded:: 2.8.1
|
1596 |
+
"""
|
1597 |
+
def __str__(self):
|
1598 |
+
try:
|
1599 |
+
return self.args[0] % self.args[1:]
|
1600 |
+
except (TypeError, IndexError):
|
1601 |
+
return super(ParserError, self).__str__()
|
1602 |
+
|
1603 |
+
def __repr__(self):
|
1604 |
+
args = ", ".join("'%s'" % arg for arg in self.args)
|
1605 |
+
return "%s(%s)" % (self.__class__.__name__, args)
|
1606 |
+
|
1607 |
+
|
1608 |
+
class UnknownTimezoneWarning(RuntimeWarning):
|
1609 |
+
"""Raised when the parser finds a timezone it cannot parse into a tzinfo.
|
1610 |
+
|
1611 |
+
.. versionadded:: 2.7.0
|
1612 |
+
"""
|
1613 |
+
# vim:ts=4:sw=4:et
|
env-llmeval/lib/python3.10/site-packages/dateutil/parser/isoparser.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
This module offers a parser for ISO-8601 strings
|
4 |
+
|
5 |
+
It is intended to support all valid date, time and datetime formats per the
|
6 |
+
ISO-8601 specification.
|
7 |
+
|
8 |
+
..versionadded:: 2.7.0
|
9 |
+
"""
|
10 |
+
from datetime import datetime, timedelta, time, date
|
11 |
+
import calendar
|
12 |
+
from dateutil import tz
|
13 |
+
|
14 |
+
from functools import wraps
|
15 |
+
|
16 |
+
import re
|
17 |
+
import six
|
18 |
+
|
19 |
+
__all__ = ["isoparse", "isoparser"]
|
20 |
+
|
21 |
+
|
22 |
+
def _takes_ascii(f):
|
23 |
+
@wraps(f)
|
24 |
+
def func(self, str_in, *args, **kwargs):
|
25 |
+
# If it's a stream, read the whole thing
|
26 |
+
str_in = getattr(str_in, 'read', lambda: str_in)()
|
27 |
+
|
28 |
+
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
|
29 |
+
if isinstance(str_in, six.text_type):
|
30 |
+
# ASCII is the same in UTF-8
|
31 |
+
try:
|
32 |
+
str_in = str_in.encode('ascii')
|
33 |
+
except UnicodeEncodeError as e:
|
34 |
+
msg = 'ISO-8601 strings should contain only ASCII characters'
|
35 |
+
six.raise_from(ValueError(msg), e)
|
36 |
+
|
37 |
+
return f(self, str_in, *args, **kwargs)
|
38 |
+
|
39 |
+
return func
|
40 |
+
|
41 |
+
|
42 |
+
class isoparser(object):
|
43 |
+
def __init__(self, sep=None):
|
44 |
+
"""
|
45 |
+
:param sep:
|
46 |
+
A single character that separates date and time portions. If
|
47 |
+
``None``, the parser will accept any single character.
|
48 |
+
For strict ISO-8601 adherence, pass ``'T'``.
|
49 |
+
"""
|
50 |
+
if sep is not None:
|
51 |
+
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
|
52 |
+
raise ValueError('Separator must be a single, non-numeric ' +
|
53 |
+
'ASCII character')
|
54 |
+
|
55 |
+
sep = sep.encode('ascii')
|
56 |
+
|
57 |
+
self._sep = sep
|
58 |
+
|
59 |
+
@_takes_ascii
|
60 |
+
def isoparse(self, dt_str):
|
61 |
+
"""
|
62 |
+
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
|
63 |
+
|
64 |
+
An ISO-8601 datetime string consists of a date portion, followed
|
65 |
+
optionally by a time portion - the date and time portions are separated
|
66 |
+
by a single character separator, which is ``T`` in the official
|
67 |
+
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
|
68 |
+
combined with a time portion.
|
69 |
+
|
70 |
+
Supported date formats are:
|
71 |
+
|
72 |
+
Common:
|
73 |
+
|
74 |
+
- ``YYYY``
|
75 |
+
- ``YYYY-MM``
|
76 |
+
- ``YYYY-MM-DD`` or ``YYYYMMDD``
|
77 |
+
|
78 |
+
Uncommon:
|
79 |
+
|
80 |
+
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
|
81 |
+
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
|
82 |
+
|
83 |
+
The ISO week and day numbering follows the same logic as
|
84 |
+
:func:`datetime.date.isocalendar`.
|
85 |
+
|
86 |
+
Supported time formats are:
|
87 |
+
|
88 |
+
- ``hh``
|
89 |
+
- ``hh:mm`` or ``hhmm``
|
90 |
+
- ``hh:mm:ss`` or ``hhmmss``
|
91 |
+
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
|
92 |
+
|
93 |
+
Midnight is a special case for `hh`, as the standard supports both
|
94 |
+
00:00 and 24:00 as a representation. The decimal separator can be
|
95 |
+
either a dot or a comma.
|
96 |
+
|
97 |
+
|
98 |
+
.. caution::
|
99 |
+
|
100 |
+
Support for fractional components other than seconds is part of the
|
101 |
+
ISO-8601 standard, but is not currently implemented in this parser.
|
102 |
+
|
103 |
+
Supported time zone offset formats are:
|
104 |
+
|
105 |
+
- `Z` (UTC)
|
106 |
+
- `±HH:MM`
|
107 |
+
- `±HHMM`
|
108 |
+
- `±HH`
|
109 |
+
|
110 |
+
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
|
111 |
+
with the exception of UTC, which will be represented as
|
112 |
+
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
|
113 |
+
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
|
114 |
+
|
115 |
+
:param dt_str:
|
116 |
+
A string or stream containing only an ISO-8601 datetime string
|
117 |
+
|
118 |
+
:return:
|
119 |
+
Returns a :class:`datetime.datetime` representing the string.
|
120 |
+
Unspecified components default to their lowest value.
|
121 |
+
|
122 |
+
.. warning::
|
123 |
+
|
124 |
+
As of version 2.7.0, the strictness of the parser should not be
|
125 |
+
considered a stable part of the contract. Any valid ISO-8601 string
|
126 |
+
that parses correctly with the default settings will continue to
|
127 |
+
parse correctly in future versions, but invalid strings that
|
128 |
+
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
|
129 |
+
guaranteed to continue failing in future versions if they encode
|
130 |
+
a valid date.
|
131 |
+
|
132 |
+
.. versionadded:: 2.7.0
|
133 |
+
"""
|
134 |
+
components, pos = self._parse_isodate(dt_str)
|
135 |
+
|
136 |
+
if len(dt_str) > pos:
|
137 |
+
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
|
138 |
+
components += self._parse_isotime(dt_str[pos + 1:])
|
139 |
+
else:
|
140 |
+
raise ValueError('String contains unknown ISO components')
|
141 |
+
|
142 |
+
if len(components) > 3 and components[3] == 24:
|
143 |
+
components[3] = 0
|
144 |
+
return datetime(*components) + timedelta(days=1)
|
145 |
+
|
146 |
+
return datetime(*components)
|
147 |
+
|
148 |
+
@_takes_ascii
|
149 |
+
def parse_isodate(self, datestr):
|
150 |
+
"""
|
151 |
+
Parse the date portion of an ISO string.
|
152 |
+
|
153 |
+
:param datestr:
|
154 |
+
The string portion of an ISO string, without a separator
|
155 |
+
|
156 |
+
:return:
|
157 |
+
Returns a :class:`datetime.date` object
|
158 |
+
"""
|
159 |
+
components, pos = self._parse_isodate(datestr)
|
160 |
+
if pos < len(datestr):
|
161 |
+
raise ValueError('String contains unknown ISO ' +
|
162 |
+
'components: {!r}'.format(datestr.decode('ascii')))
|
163 |
+
return date(*components)
|
164 |
+
|
165 |
+
@_takes_ascii
|
166 |
+
def parse_isotime(self, timestr):
|
167 |
+
"""
|
168 |
+
Parse the time portion of an ISO string.
|
169 |
+
|
170 |
+
:param timestr:
|
171 |
+
The time portion of an ISO string, without a separator
|
172 |
+
|
173 |
+
:return:
|
174 |
+
Returns a :class:`datetime.time` object
|
175 |
+
"""
|
176 |
+
components = self._parse_isotime(timestr)
|
177 |
+
if components[0] == 24:
|
178 |
+
components[0] = 0
|
179 |
+
return time(*components)
|
180 |
+
|
181 |
+
@_takes_ascii
|
182 |
+
def parse_tzstr(self, tzstr, zero_as_utc=True):
|
183 |
+
"""
|
184 |
+
Parse a valid ISO time zone string.
|
185 |
+
|
186 |
+
See :func:`isoparser.isoparse` for details on supported formats.
|
187 |
+
|
188 |
+
:param tzstr:
|
189 |
+
A string representing an ISO time zone offset
|
190 |
+
|
191 |
+
:param zero_as_utc:
|
192 |
+
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
|
193 |
+
|
194 |
+
:return:
|
195 |
+
Returns :class:`dateutil.tz.tzoffset` for offsets and
|
196 |
+
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
|
197 |
+
specified) offsets equivalent to UTC.
|
198 |
+
"""
|
199 |
+
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
|
200 |
+
|
201 |
+
# Constants
|
202 |
+
_DATE_SEP = b'-'
|
203 |
+
_TIME_SEP = b':'
|
204 |
+
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
|
205 |
+
|
206 |
+
def _parse_isodate(self, dt_str):
|
207 |
+
try:
|
208 |
+
return self._parse_isodate_common(dt_str)
|
209 |
+
except ValueError:
|
210 |
+
return self._parse_isodate_uncommon(dt_str)
|
211 |
+
|
212 |
+
def _parse_isodate_common(self, dt_str):
|
213 |
+
len_str = len(dt_str)
|
214 |
+
components = [1, 1, 1]
|
215 |
+
|
216 |
+
if len_str < 4:
|
217 |
+
raise ValueError('ISO string too short')
|
218 |
+
|
219 |
+
# Year
|
220 |
+
components[0] = int(dt_str[0:4])
|
221 |
+
pos = 4
|
222 |
+
if pos >= len_str:
|
223 |
+
return components, pos
|
224 |
+
|
225 |
+
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
|
226 |
+
if has_sep:
|
227 |
+
pos += 1
|
228 |
+
|
229 |
+
# Month
|
230 |
+
if len_str - pos < 2:
|
231 |
+
raise ValueError('Invalid common month')
|
232 |
+
|
233 |
+
components[1] = int(dt_str[pos:pos + 2])
|
234 |
+
pos += 2
|
235 |
+
|
236 |
+
if pos >= len_str:
|
237 |
+
if has_sep:
|
238 |
+
return components, pos
|
239 |
+
else:
|
240 |
+
raise ValueError('Invalid ISO format')
|
241 |
+
|
242 |
+
if has_sep:
|
243 |
+
if dt_str[pos:pos + 1] != self._DATE_SEP:
|
244 |
+
raise ValueError('Invalid separator in ISO string')
|
245 |
+
pos += 1
|
246 |
+
|
247 |
+
# Day
|
248 |
+
if len_str - pos < 2:
|
249 |
+
raise ValueError('Invalid common day')
|
250 |
+
components[2] = int(dt_str[pos:pos + 2])
|
251 |
+
return components, pos + 2
|
252 |
+
|
253 |
+
def _parse_isodate_uncommon(self, dt_str):
|
254 |
+
if len(dt_str) < 4:
|
255 |
+
raise ValueError('ISO string too short')
|
256 |
+
|
257 |
+
# All ISO formats start with the year
|
258 |
+
year = int(dt_str[0:4])
|
259 |
+
|
260 |
+
has_sep = dt_str[4:5] == self._DATE_SEP
|
261 |
+
|
262 |
+
pos = 4 + has_sep # Skip '-' if it's there
|
263 |
+
if dt_str[pos:pos + 1] == b'W':
|
264 |
+
# YYYY-?Www-?D?
|
265 |
+
pos += 1
|
266 |
+
weekno = int(dt_str[pos:pos + 2])
|
267 |
+
pos += 2
|
268 |
+
|
269 |
+
dayno = 1
|
270 |
+
if len(dt_str) > pos:
|
271 |
+
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
|
272 |
+
raise ValueError('Inconsistent use of dash separator')
|
273 |
+
|
274 |
+
pos += has_sep
|
275 |
+
|
276 |
+
dayno = int(dt_str[pos:pos + 1])
|
277 |
+
pos += 1
|
278 |
+
|
279 |
+
base_date = self._calculate_weekdate(year, weekno, dayno)
|
280 |
+
else:
|
281 |
+
# YYYYDDD or YYYY-DDD
|
282 |
+
if len(dt_str) - pos < 3:
|
283 |
+
raise ValueError('Invalid ordinal day')
|
284 |
+
|
285 |
+
ordinal_day = int(dt_str[pos:pos + 3])
|
286 |
+
pos += 3
|
287 |
+
|
288 |
+
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
|
289 |
+
raise ValueError('Invalid ordinal day' +
|
290 |
+
' {} for year {}'.format(ordinal_day, year))
|
291 |
+
|
292 |
+
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
|
293 |
+
|
294 |
+
components = [base_date.year, base_date.month, base_date.day]
|
295 |
+
return components, pos
|
296 |
+
|
297 |
+
def _calculate_weekdate(self, year, week, day):
|
298 |
+
"""
|
299 |
+
Calculate the day of corresponding to the ISO year-week-day calendar.
|
300 |
+
|
301 |
+
This function is effectively the inverse of
|
302 |
+
:func:`datetime.date.isocalendar`.
|
303 |
+
|
304 |
+
:param year:
|
305 |
+
The year in the ISO calendar
|
306 |
+
|
307 |
+
:param week:
|
308 |
+
The week in the ISO calendar - range is [1, 53]
|
309 |
+
|
310 |
+
:param day:
|
311 |
+
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
|
312 |
+
|
313 |
+
:return:
|
314 |
+
Returns a :class:`datetime.date`
|
315 |
+
"""
|
316 |
+
if not 0 < week < 54:
|
317 |
+
raise ValueError('Invalid week: {}'.format(week))
|
318 |
+
|
319 |
+
if not 0 < day < 8: # Range is 1-7
|
320 |
+
raise ValueError('Invalid weekday: {}'.format(day))
|
321 |
+
|
322 |
+
# Get week 1 for the specific year:
|
323 |
+
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
|
324 |
+
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
|
325 |
+
|
326 |
+
# Now add the specific number of weeks and days to get what we want
|
327 |
+
week_offset = (week - 1) * 7 + (day - 1)
|
328 |
+
return week_1 + timedelta(days=week_offset)
|
329 |
+
|
330 |
+
def _parse_isotime(self, timestr):
|
331 |
+
len_str = len(timestr)
|
332 |
+
components = [0, 0, 0, 0, None]
|
333 |
+
pos = 0
|
334 |
+
comp = -1
|
335 |
+
|
336 |
+
if len_str < 2:
|
337 |
+
raise ValueError('ISO time too short')
|
338 |
+
|
339 |
+
has_sep = False
|
340 |
+
|
341 |
+
while pos < len_str and comp < 5:
|
342 |
+
comp += 1
|
343 |
+
|
344 |
+
if timestr[pos:pos + 1] in b'-+Zz':
|
345 |
+
# Detect time zone boundary
|
346 |
+
components[-1] = self._parse_tzstr(timestr[pos:])
|
347 |
+
pos = len_str
|
348 |
+
break
|
349 |
+
|
350 |
+
if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
|
351 |
+
has_sep = True
|
352 |
+
pos += 1
|
353 |
+
elif comp == 2 and has_sep:
|
354 |
+
if timestr[pos:pos+1] != self._TIME_SEP:
|
355 |
+
raise ValueError('Inconsistent use of colon separator')
|
356 |
+
pos += 1
|
357 |
+
|
358 |
+
if comp < 3:
|
359 |
+
# Hour, minute, second
|
360 |
+
components[comp] = int(timestr[pos:pos + 2])
|
361 |
+
pos += 2
|
362 |
+
|
363 |
+
if comp == 3:
|
364 |
+
# Fraction of a second
|
365 |
+
frac = self._FRACTION_REGEX.match(timestr[pos:])
|
366 |
+
if not frac:
|
367 |
+
continue
|
368 |
+
|
369 |
+
us_str = frac.group(1)[:6] # Truncate to microseconds
|
370 |
+
components[comp] = int(us_str) * 10**(6 - len(us_str))
|
371 |
+
pos += len(frac.group())
|
372 |
+
|
373 |
+
if pos < len_str:
|
374 |
+
raise ValueError('Unused components in ISO string')
|
375 |
+
|
376 |
+
if components[0] == 24:
|
377 |
+
# Standard supports 00:00 and 24:00 as representations of midnight
|
378 |
+
if any(component != 0 for component in components[1:4]):
|
379 |
+
raise ValueError('Hour may only be 24 at 24:00:00.000')
|
380 |
+
|
381 |
+
return components
|
382 |
+
|
383 |
+
def _parse_tzstr(self, tzstr, zero_as_utc=True):
|
384 |
+
if tzstr == b'Z' or tzstr == b'z':
|
385 |
+
return tz.UTC
|
386 |
+
|
387 |
+
if len(tzstr) not in {3, 5, 6}:
|
388 |
+
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
|
389 |
+
|
390 |
+
if tzstr[0:1] == b'-':
|
391 |
+
mult = -1
|
392 |
+
elif tzstr[0:1] == b'+':
|
393 |
+
mult = 1
|
394 |
+
else:
|
395 |
+
raise ValueError('Time zone offset requires sign')
|
396 |
+
|
397 |
+
hours = int(tzstr[1:3])
|
398 |
+
if len(tzstr) == 3:
|
399 |
+
minutes = 0
|
400 |
+
else:
|
401 |
+
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
|
402 |
+
|
403 |
+
if zero_as_utc and hours == 0 and minutes == 0:
|
404 |
+
return tz.UTC
|
405 |
+
else:
|
406 |
+
if minutes > 59:
|
407 |
+
raise ValueError('Invalid minutes in time zone offset')
|
408 |
+
|
409 |
+
if hours > 23:
|
410 |
+
raise ValueError('Invalid hours in time zone offset')
|
411 |
+
|
412 |
+
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
|
413 |
+
|
414 |
+
|
415 |
+
DEFAULT_ISOPARSER = isoparser()
|
416 |
+
isoparse = DEFAULT_ISOPARSER.isoparse
|
env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/__init__.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
import warnings
|
3 |
+
import json
|
4 |
+
|
5 |
+
from tarfile import TarFile
|
6 |
+
from pkgutil import get_data
|
7 |
+
from io import BytesIO
|
8 |
+
|
9 |
+
from dateutil.tz import tzfile as _tzfile
|
10 |
+
|
11 |
+
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
|
12 |
+
|
13 |
+
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
|
14 |
+
METADATA_FN = 'METADATA'
|
15 |
+
|
16 |
+
|
17 |
+
class tzfile(_tzfile):
|
18 |
+
def __reduce__(self):
|
19 |
+
return (gettz, (self._filename,))
|
20 |
+
|
21 |
+
|
22 |
+
def getzoneinfofile_stream():
|
23 |
+
try:
|
24 |
+
return BytesIO(get_data(__name__, ZONEFILENAME))
|
25 |
+
except IOError as e: # TODO switch to FileNotFoundError?
|
26 |
+
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
|
27 |
+
return None
|
28 |
+
|
29 |
+
|
30 |
+
class ZoneInfoFile(object):
|
31 |
+
def __init__(self, zonefile_stream=None):
|
32 |
+
if zonefile_stream is not None:
|
33 |
+
with TarFile.open(fileobj=zonefile_stream) as tf:
|
34 |
+
self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
|
35 |
+
for zf in tf.getmembers()
|
36 |
+
if zf.isfile() and zf.name != METADATA_FN}
|
37 |
+
# deal with links: They'll point to their parent object. Less
|
38 |
+
# waste of memory
|
39 |
+
links = {zl.name: self.zones[zl.linkname]
|
40 |
+
for zl in tf.getmembers() if
|
41 |
+
zl.islnk() or zl.issym()}
|
42 |
+
self.zones.update(links)
|
43 |
+
try:
|
44 |
+
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
|
45 |
+
metadata_str = metadata_json.read().decode('UTF-8')
|
46 |
+
self.metadata = json.loads(metadata_str)
|
47 |
+
except KeyError:
|
48 |
+
# no metadata in tar file
|
49 |
+
self.metadata = None
|
50 |
+
else:
|
51 |
+
self.zones = {}
|
52 |
+
self.metadata = None
|
53 |
+
|
54 |
+
def get(self, name, default=None):
|
55 |
+
"""
|
56 |
+
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
|
57 |
+
for retrieving zones from the zone dictionary.
|
58 |
+
|
59 |
+
:param name:
|
60 |
+
The name of the zone to retrieve. (Generally IANA zone names)
|
61 |
+
|
62 |
+
:param default:
|
63 |
+
The value to return in the event of a missing key.
|
64 |
+
|
65 |
+
.. versionadded:: 2.6.0
|
66 |
+
|
67 |
+
"""
|
68 |
+
return self.zones.get(name, default)
|
69 |
+
|
70 |
+
|
71 |
+
# The current API has gettz as a module function, although in fact it taps into
|
72 |
+
# a stateful class. So as a workaround for now, without changing the API, we
|
73 |
+
# will create a new "global" class instance the first time a user requests a
|
74 |
+
# timezone. Ugly, but adheres to the api.
|
75 |
+
#
|
76 |
+
# TODO: Remove after deprecation period.
|
77 |
+
_CLASS_ZONE_INSTANCE = []
|
78 |
+
|
79 |
+
|
80 |
+
def get_zonefile_instance(new_instance=False):
|
81 |
+
"""
|
82 |
+
This is a convenience function which provides a :class:`ZoneInfoFile`
|
83 |
+
instance using the data provided by the ``dateutil`` package. By default, it
|
84 |
+
caches a single instance of the ZoneInfoFile object and returns that.
|
85 |
+
|
86 |
+
:param new_instance:
|
87 |
+
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
|
88 |
+
used as the cached instance for the next call. Otherwise, new instances
|
89 |
+
are created only as necessary.
|
90 |
+
|
91 |
+
:return:
|
92 |
+
Returns a :class:`ZoneInfoFile` object.
|
93 |
+
|
94 |
+
.. versionadded:: 2.6
|
95 |
+
"""
|
96 |
+
if new_instance:
|
97 |
+
zif = None
|
98 |
+
else:
|
99 |
+
zif = getattr(get_zonefile_instance, '_cached_instance', None)
|
100 |
+
|
101 |
+
if zif is None:
|
102 |
+
zif = ZoneInfoFile(getzoneinfofile_stream())
|
103 |
+
|
104 |
+
get_zonefile_instance._cached_instance = zif
|
105 |
+
|
106 |
+
return zif
|
107 |
+
|
108 |
+
|
109 |
+
def gettz(name):
|
110 |
+
"""
|
111 |
+
This retrieves a time zone from the local zoneinfo tarball that is packaged
|
112 |
+
with dateutil.
|
113 |
+
|
114 |
+
:param name:
|
115 |
+
An IANA-style time zone name, as found in the zoneinfo file.
|
116 |
+
|
117 |
+
:return:
|
118 |
+
Returns a :class:`dateutil.tz.tzfile` time zone object.
|
119 |
+
|
120 |
+
.. warning::
|
121 |
+
It is generally inadvisable to use this function, and it is only
|
122 |
+
provided for API compatibility with earlier versions. This is *not*
|
123 |
+
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
|
124 |
+
time zone based on the inputs, favoring system zoneinfo. This is ONLY
|
125 |
+
for accessing the dateutil-specific zoneinfo (which may be out of
|
126 |
+
date compared to the system zoneinfo).
|
127 |
+
|
128 |
+
.. deprecated:: 2.6
|
129 |
+
If you need to use a specific zoneinfofile over the system zoneinfo,
|
130 |
+
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
|
131 |
+
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
|
132 |
+
|
133 |
+
Use :func:`get_zonefile_instance` to retrieve an instance of the
|
134 |
+
dateutil-provided zoneinfo.
|
135 |
+
"""
|
136 |
+
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
|
137 |
+
"to use the dateutil-provided zoneinfo files, instantiate a "
|
138 |
+
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
|
139 |
+
"instead. See the documentation for details.",
|
140 |
+
DeprecationWarning)
|
141 |
+
|
142 |
+
if len(_CLASS_ZONE_INSTANCE) == 0:
|
143 |
+
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
|
144 |
+
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
|
145 |
+
|
146 |
+
|
147 |
+
def gettz_db_metadata():
|
148 |
+
""" Get the zonefile metadata
|
149 |
+
|
150 |
+
See `zonefile_metadata`_
|
151 |
+
|
152 |
+
:returns:
|
153 |
+
A dictionary with the database metadata
|
154 |
+
|
155 |
+
.. deprecated:: 2.6
|
156 |
+
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
|
157 |
+
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
|
158 |
+
"""
|
159 |
+
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
|
160 |
+
"versions, to use the dateutil-provided zoneinfo files, "
|
161 |
+
"ZoneInfoFile object and query the 'metadata' attribute "
|
162 |
+
"instead. See the documentation for details.",
|
163 |
+
DeprecationWarning)
|
164 |
+
|
165 |
+
if len(_CLASS_ZONE_INSTANCE) == 0:
|
166 |
+
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
|
167 |
+
return _CLASS_ZONE_INSTANCE[0].metadata
|
env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.76 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-310.pyc
ADDED
Binary file (2.69 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/rebuild.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
import tempfile
|
4 |
+
import shutil
|
5 |
+
import json
|
6 |
+
from subprocess import check_call, check_output
|
7 |
+
from tarfile import TarFile
|
8 |
+
|
9 |
+
from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
|
10 |
+
|
11 |
+
|
12 |
+
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
|
13 |
+
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
|
14 |
+
|
15 |
+
filename is the timezone tarball from ``ftp.iana.org/tz``.
|
16 |
+
|
17 |
+
"""
|
18 |
+
tmpdir = tempfile.mkdtemp()
|
19 |
+
zonedir = os.path.join(tmpdir, "zoneinfo")
|
20 |
+
moduledir = os.path.dirname(__file__)
|
21 |
+
try:
|
22 |
+
with TarFile.open(filename) as tf:
|
23 |
+
for name in zonegroups:
|
24 |
+
tf.extract(name, tmpdir)
|
25 |
+
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
|
26 |
+
|
27 |
+
_run_zic(zonedir, filepaths)
|
28 |
+
|
29 |
+
# write metadata file
|
30 |
+
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
|
31 |
+
json.dump(metadata, f, indent=4, sort_keys=True)
|
32 |
+
target = os.path.join(moduledir, ZONEFILENAME)
|
33 |
+
with TarFile.open(target, "w:%s" % format) as tf:
|
34 |
+
for entry in os.listdir(zonedir):
|
35 |
+
entrypath = os.path.join(zonedir, entry)
|
36 |
+
tf.add(entrypath, entry)
|
37 |
+
finally:
|
38 |
+
shutil.rmtree(tmpdir)
|
39 |
+
|
40 |
+
|
41 |
+
def _run_zic(zonedir, filepaths):
|
42 |
+
"""Calls the ``zic`` compiler in a compatible way to get a "fat" binary.
|
43 |
+
|
44 |
+
Recent versions of ``zic`` default to ``-b slim``, while older versions
|
45 |
+
don't even have the ``-b`` option (but default to "fat" binaries). The
|
46 |
+
current version of dateutil does not support Version 2+ TZif files, which
|
47 |
+
causes problems when used in conjunction with "slim" binaries, so this
|
48 |
+
function is used to ensure that we always get a "fat" binary.
|
49 |
+
"""
|
50 |
+
|
51 |
+
try:
|
52 |
+
help_text = check_output(["zic", "--help"])
|
53 |
+
except OSError as e:
|
54 |
+
_print_on_nosuchfile(e)
|
55 |
+
raise
|
56 |
+
|
57 |
+
if b"-b " in help_text:
|
58 |
+
bloat_args = ["-b", "fat"]
|
59 |
+
else:
|
60 |
+
bloat_args = []
|
61 |
+
|
62 |
+
check_call(["zic"] + bloat_args + ["-d", zonedir] + filepaths)
|
63 |
+
|
64 |
+
|
65 |
+
def _print_on_nosuchfile(e):
|
66 |
+
"""Print helpful troubleshooting message
|
67 |
+
|
68 |
+
e is an exception raised by subprocess.check_call()
|
69 |
+
|
70 |
+
"""
|
71 |
+
if e.errno == 2:
|
72 |
+
logging.error(
|
73 |
+
"Could not find zic. Perhaps you need to install "
|
74 |
+
"libc-bin or some other package that provides it, "
|
75 |
+
"or it's not in your PATH?")
|
env-llmeval/lib/python3.10/site-packages/fsspec/__init__.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from importlib.metadata import entry_points
|
2 |
+
|
3 |
+
from . import _version, caching
|
4 |
+
from .callbacks import Callback
|
5 |
+
from .compression import available_compressions
|
6 |
+
from .core import get_fs_token_paths, open, open_files, open_local
|
7 |
+
from .exceptions import FSTimeoutError
|
8 |
+
from .mapping import FSMap, get_mapper
|
9 |
+
from .registry import (
|
10 |
+
available_protocols,
|
11 |
+
filesystem,
|
12 |
+
get_filesystem_class,
|
13 |
+
register_implementation,
|
14 |
+
registry,
|
15 |
+
)
|
16 |
+
from .spec import AbstractFileSystem
|
17 |
+
|
18 |
+
__version__ = _version.get_versions()["version"]
|
19 |
+
|
20 |
+
__all__ = [
|
21 |
+
"AbstractFileSystem",
|
22 |
+
"FSTimeoutError",
|
23 |
+
"FSMap",
|
24 |
+
"filesystem",
|
25 |
+
"register_implementation",
|
26 |
+
"get_filesystem_class",
|
27 |
+
"get_fs_token_paths",
|
28 |
+
"get_mapper",
|
29 |
+
"open",
|
30 |
+
"open_files",
|
31 |
+
"open_local",
|
32 |
+
"registry",
|
33 |
+
"caching",
|
34 |
+
"Callback",
|
35 |
+
"available_protocols",
|
36 |
+
"available_compressions",
|
37 |
+
]
|
38 |
+
|
39 |
+
|
40 |
+
def process_entries():
|
41 |
+
if entry_points is not None:
|
42 |
+
try:
|
43 |
+
eps = entry_points()
|
44 |
+
except TypeError:
|
45 |
+
pass # importlib-metadata < 0.8
|
46 |
+
else:
|
47 |
+
if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
|
48 |
+
specs = eps.select(group="fsspec.specs")
|
49 |
+
else:
|
50 |
+
specs = eps.get("fsspec.specs", [])
|
51 |
+
registered_names = {}
|
52 |
+
for spec in specs:
|
53 |
+
err_msg = f"Unable to load filesystem from {spec}"
|
54 |
+
name = spec.name
|
55 |
+
if name in registered_names:
|
56 |
+
continue
|
57 |
+
registered_names[name] = True
|
58 |
+
register_implementation(
|
59 |
+
name,
|
60 |
+
spec.value.replace(":", "."),
|
61 |
+
errtxt=err_msg,
|
62 |
+
# We take our implementations as the ones to overload with if
|
63 |
+
# for some reason we encounter some, may be the same, already
|
64 |
+
# registered
|
65 |
+
clobber=True,
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
process_entries()
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc
ADDED
Binary file (498 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc
ADDED
Binary file (2.98 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc
ADDED
Binary file (5.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc
ADDED
Binary file (3.83 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc
ADDED
Binary file (21.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc
ADDED
Binary file (3.41 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/exceptions.cpython-310.pyc
ADDED
Binary file (735 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc
ADDED
Binary file (10.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc
ADDED
Binary file (12.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc
ADDED
Binary file (14.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc
ADDED
Binary file (8.86 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc
ADDED
Binary file (58.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (19.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/fsspec/_version.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# This file was generated by 'versioneer.py' (0.29) from
|
3 |
+
# revision-control system data, or from the parent directory name of an
|
4 |
+
# unpacked source archive. Distribution tarballs contain a pre-generated copy
|
5 |
+
# of this file.
|
6 |
+
|
7 |
+
import json
|
8 |
+
|
9 |
+
version_json = '''
|
10 |
+
{
|
11 |
+
"date": "2024-02-04T20:21:42-0500",
|
12 |
+
"dirty": false,
|
13 |
+
"error": null,
|
14 |
+
"full-revisionid": "5dc364e13b63609717d77b7361e80cfa64e3b8fd",
|
15 |
+
"version": "2024.2.0"
|
16 |
+
}
|
17 |
+
''' # END VERSION_JSON
|
18 |
+
|
19 |
+
|
20 |
+
def get_versions():
|
21 |
+
return json.loads(version_json)
|
env-llmeval/lib/python3.10/site-packages/fsspec/archive.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fsspec import AbstractFileSystem
|
2 |
+
from fsspec.utils import tokenize
|
3 |
+
|
4 |
+
|
5 |
+
class AbstractArchiveFileSystem(AbstractFileSystem):
|
6 |
+
"""
|
7 |
+
A generic superclass for implementing Archive-based filesystems.
|
8 |
+
|
9 |
+
Currently, it is shared amongst
|
10 |
+
:class:`~fsspec.implementations.zip.ZipFileSystem`,
|
11 |
+
:class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and
|
12 |
+
:class:`~fsspec.implementations.tar.TarFileSystem`.
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __str__(self):
|
16 |
+
return f"<Archive-like object {type(self).__name__} at {id(self)}>"
|
17 |
+
|
18 |
+
__repr__ = __str__
|
19 |
+
|
20 |
+
def ukey(self, path):
|
21 |
+
return tokenize(path, self.fo, self.protocol)
|
22 |
+
|
23 |
+
def _all_dirnames(self, paths):
|
24 |
+
"""Returns *all* directory names for each path in paths, including intermediate
|
25 |
+
ones.
|
26 |
+
|
27 |
+
Parameters
|
28 |
+
----------
|
29 |
+
paths: Iterable of path strings
|
30 |
+
"""
|
31 |
+
if len(paths) == 0:
|
32 |
+
return set()
|
33 |
+
|
34 |
+
dirnames = {self._parent(path) for path in paths} - {self.root_marker}
|
35 |
+
return dirnames | self._all_dirnames(dirnames)
|
36 |
+
|
37 |
+
def info(self, path, **kwargs):
|
38 |
+
self._get_dirs()
|
39 |
+
path = self._strip_protocol(path)
|
40 |
+
if path in {"", "/"} and self.dir_cache:
|
41 |
+
return {"name": "", "type": "directory", "size": 0}
|
42 |
+
if path in self.dir_cache:
|
43 |
+
return self.dir_cache[path]
|
44 |
+
elif path + "/" in self.dir_cache:
|
45 |
+
return self.dir_cache[path + "/"]
|
46 |
+
else:
|
47 |
+
raise FileNotFoundError(path)
|
48 |
+
|
49 |
+
def ls(self, path, detail=True, **kwargs):
|
50 |
+
self._get_dirs()
|
51 |
+
paths = {}
|
52 |
+
for p, f in self.dir_cache.items():
|
53 |
+
p = p.rstrip("/")
|
54 |
+
if "/" in p:
|
55 |
+
root = p.rsplit("/", 1)[0]
|
56 |
+
else:
|
57 |
+
root = ""
|
58 |
+
if root == path.rstrip("/"):
|
59 |
+
paths[p] = f
|
60 |
+
elif all(
|
61 |
+
(a == b)
|
62 |
+
for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
|
63 |
+
):
|
64 |
+
# root directory entry
|
65 |
+
ppath = p.rstrip("/").split("/", 1)[0]
|
66 |
+
if ppath not in paths:
|
67 |
+
out = {"name": ppath, "size": 0, "type": "directory"}
|
68 |
+
paths[ppath] = out
|
69 |
+
if detail:
|
70 |
+
out = sorted(paths.values(), key=lambda _: _["name"])
|
71 |
+
return out
|
72 |
+
else:
|
73 |
+
return sorted(paths)
|
env-llmeval/lib/python3.10/site-packages/fsspec/caching.py
ADDED
@@ -0,0 +1,875 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import collections
|
4 |
+
import functools
|
5 |
+
import logging
|
6 |
+
import math
|
7 |
+
import os
|
8 |
+
import threading
|
9 |
+
import warnings
|
10 |
+
from concurrent.futures import Future, ThreadPoolExecutor
|
11 |
+
from typing import (
|
12 |
+
TYPE_CHECKING,
|
13 |
+
Any,
|
14 |
+
Callable,
|
15 |
+
ClassVar,
|
16 |
+
Generic,
|
17 |
+
NamedTuple,
|
18 |
+
OrderedDict,
|
19 |
+
TypeVar,
|
20 |
+
)
|
21 |
+
|
22 |
+
if TYPE_CHECKING:
|
23 |
+
import mmap
|
24 |
+
|
25 |
+
from typing_extensions import ParamSpec
|
26 |
+
|
27 |
+
P = ParamSpec("P")
|
28 |
+
else:
|
29 |
+
P = TypeVar("P")
|
30 |
+
|
31 |
+
T = TypeVar("T")
|
32 |
+
|
33 |
+
|
34 |
+
logger = logging.getLogger("fsspec")
|
35 |
+
|
36 |
+
Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
|
37 |
+
|
38 |
+
|
39 |
+
class BaseCache:
|
40 |
+
"""Pass-though cache: doesn't keep anything, calls every time
|
41 |
+
|
42 |
+
Acts as base class for other cachers
|
43 |
+
|
44 |
+
Parameters
|
45 |
+
----------
|
46 |
+
blocksize: int
|
47 |
+
How far to read ahead in numbers of bytes
|
48 |
+
fetcher: func
|
49 |
+
Function of the form f(start, end) which gets bytes from remote as
|
50 |
+
specified
|
51 |
+
size: int
|
52 |
+
How big this file is
|
53 |
+
"""
|
54 |
+
|
55 |
+
name: ClassVar[str] = "none"
|
56 |
+
|
57 |
+
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
|
58 |
+
self.blocksize = blocksize
|
59 |
+
self.fetcher = fetcher
|
60 |
+
self.size = size
|
61 |
+
|
62 |
+
def _fetch(self, start: int | None, stop: int | None) -> bytes:
|
63 |
+
if start is None:
|
64 |
+
start = 0
|
65 |
+
if stop is None:
|
66 |
+
stop = self.size
|
67 |
+
if start >= self.size or start >= stop:
|
68 |
+
return b""
|
69 |
+
return self.fetcher(start, stop)
|
70 |
+
|
71 |
+
|
72 |
+
class MMapCache(BaseCache):
|
73 |
+
"""memory-mapped sparse file cache
|
74 |
+
|
75 |
+
Opens temporary file, which is filled blocks-wise when data is requested.
|
76 |
+
Ensure there is enough disc space in the temporary location.
|
77 |
+
|
78 |
+
This cache method might only work on posix
|
79 |
+
"""
|
80 |
+
|
81 |
+
name = "mmap"
|
82 |
+
|
83 |
+
def __init__(
|
84 |
+
self,
|
85 |
+
blocksize: int,
|
86 |
+
fetcher: Fetcher,
|
87 |
+
size: int,
|
88 |
+
location: str | None = None,
|
89 |
+
blocks: set[int] | None = None,
|
90 |
+
) -> None:
|
91 |
+
super().__init__(blocksize, fetcher, size)
|
92 |
+
self.blocks = set() if blocks is None else blocks
|
93 |
+
self.location = location
|
94 |
+
self.cache = self._makefile()
|
95 |
+
|
96 |
+
def _makefile(self) -> mmap.mmap | bytearray:
|
97 |
+
import mmap
|
98 |
+
import tempfile
|
99 |
+
|
100 |
+
if self.size == 0:
|
101 |
+
return bytearray()
|
102 |
+
|
103 |
+
# posix version
|
104 |
+
if self.location is None or not os.path.exists(self.location):
|
105 |
+
if self.location is None:
|
106 |
+
fd = tempfile.TemporaryFile()
|
107 |
+
self.blocks = set()
|
108 |
+
else:
|
109 |
+
fd = open(self.location, "wb+")
|
110 |
+
fd.seek(self.size - 1)
|
111 |
+
fd.write(b"1")
|
112 |
+
fd.flush()
|
113 |
+
else:
|
114 |
+
fd = open(self.location, "r+b")
|
115 |
+
|
116 |
+
return mmap.mmap(fd.fileno(), self.size)
|
117 |
+
|
118 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
119 |
+
logger.debug(f"MMap cache fetching {start}-{end}")
|
120 |
+
if start is None:
|
121 |
+
start = 0
|
122 |
+
if end is None:
|
123 |
+
end = self.size
|
124 |
+
if start >= self.size or start >= end:
|
125 |
+
return b""
|
126 |
+
start_block = start // self.blocksize
|
127 |
+
end_block = end // self.blocksize
|
128 |
+
need = [i for i in range(start_block, end_block + 1) if i not in self.blocks]
|
129 |
+
while need:
|
130 |
+
# TODO: not a for loop so we can consolidate blocks later to
|
131 |
+
# make fewer fetch calls; this could be parallel
|
132 |
+
i = need.pop(0)
|
133 |
+
sstart = i * self.blocksize
|
134 |
+
send = min(sstart + self.blocksize, self.size)
|
135 |
+
logger.debug(f"MMap get block #{i} ({sstart}-{send}")
|
136 |
+
self.cache[sstart:send] = self.fetcher(sstart, send)
|
137 |
+
self.blocks.add(i)
|
138 |
+
|
139 |
+
return self.cache[start:end]
|
140 |
+
|
141 |
+
def __getstate__(self) -> dict[str, Any]:
|
142 |
+
state = self.__dict__.copy()
|
143 |
+
# Remove the unpicklable entries.
|
144 |
+
del state["cache"]
|
145 |
+
return state
|
146 |
+
|
147 |
+
def __setstate__(self, state: dict[str, Any]) -> None:
|
148 |
+
# Restore instance attributes
|
149 |
+
self.__dict__.update(state)
|
150 |
+
self.cache = self._makefile()
|
151 |
+
|
152 |
+
|
153 |
+
class ReadAheadCache(BaseCache):
|
154 |
+
"""Cache which reads only when we get beyond a block of data
|
155 |
+
|
156 |
+
This is a much simpler version of BytesCache, and does not attempt to
|
157 |
+
fill holes in the cache or keep fragments alive. It is best suited to
|
158 |
+
many small reads in a sequential order (e.g., reading lines from a file).
|
159 |
+
"""
|
160 |
+
|
161 |
+
name = "readahead"
|
162 |
+
|
163 |
+
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
|
164 |
+
super().__init__(blocksize, fetcher, size)
|
165 |
+
self.cache = b""
|
166 |
+
self.start = 0
|
167 |
+
self.end = 0
|
168 |
+
|
169 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
170 |
+
if start is None:
|
171 |
+
start = 0
|
172 |
+
if end is None or end > self.size:
|
173 |
+
end = self.size
|
174 |
+
if start >= self.size or start >= end:
|
175 |
+
return b""
|
176 |
+
l = end - start
|
177 |
+
if start >= self.start and end <= self.end:
|
178 |
+
# cache hit
|
179 |
+
return self.cache[start - self.start : end - self.start]
|
180 |
+
elif self.start <= start < self.end:
|
181 |
+
# partial hit
|
182 |
+
part = self.cache[start - self.start :]
|
183 |
+
l -= len(part)
|
184 |
+
start = self.end
|
185 |
+
else:
|
186 |
+
# miss
|
187 |
+
part = b""
|
188 |
+
end = min(self.size, end + self.blocksize)
|
189 |
+
self.cache = self.fetcher(start, end) # new block replaces old
|
190 |
+
self.start = start
|
191 |
+
self.end = self.start + len(self.cache)
|
192 |
+
return part + self.cache[:l]
|
193 |
+
|
194 |
+
|
195 |
+
class FirstChunkCache(BaseCache):
|
196 |
+
"""Caches the first block of a file only
|
197 |
+
|
198 |
+
This may be useful for file types where the metadata is stored in the header,
|
199 |
+
but is randomly accessed.
|
200 |
+
"""
|
201 |
+
|
202 |
+
name = "first"
|
203 |
+
|
204 |
+
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
|
205 |
+
super().__init__(blocksize, fetcher, size)
|
206 |
+
self.cache: bytes | None = None
|
207 |
+
|
208 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
209 |
+
start = start or 0
|
210 |
+
end = end or self.size
|
211 |
+
if start < self.blocksize:
|
212 |
+
if self.cache is None:
|
213 |
+
if end > self.blocksize:
|
214 |
+
data = self.fetcher(0, end)
|
215 |
+
self.cache = data[: self.blocksize]
|
216 |
+
return data[start:]
|
217 |
+
self.cache = self.fetcher(0, self.blocksize)
|
218 |
+
part = self.cache[start:end]
|
219 |
+
if end > self.blocksize:
|
220 |
+
part += self.fetcher(self.blocksize, end)
|
221 |
+
return part
|
222 |
+
else:
|
223 |
+
return self.fetcher(start, end)
|
224 |
+
|
225 |
+
|
226 |
+
class BlockCache(BaseCache):
|
227 |
+
"""
|
228 |
+
Cache holding memory as a set of blocks.
|
229 |
+
|
230 |
+
Requests are only ever made ``blocksize`` at a time, and are
|
231 |
+
stored in an LRU cache. The least recently accessed block is
|
232 |
+
discarded when more than ``maxblocks`` are stored.
|
233 |
+
|
234 |
+
Parameters
|
235 |
+
----------
|
236 |
+
blocksize : int
|
237 |
+
The number of bytes to store in each block.
|
238 |
+
Requests are only ever made for ``blocksize``, so this
|
239 |
+
should balance the overhead of making a request against
|
240 |
+
the granularity of the blocks.
|
241 |
+
fetcher : Callable
|
242 |
+
size : int
|
243 |
+
The total size of the file being cached.
|
244 |
+
maxblocks : int
|
245 |
+
The maximum number of blocks to cache for. The maximum memory
|
246 |
+
use for this cache is then ``blocksize * maxblocks``.
|
247 |
+
"""
|
248 |
+
|
249 |
+
name = "blockcache"
|
250 |
+
|
251 |
+
def __init__(
|
252 |
+
self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
|
253 |
+
) -> None:
|
254 |
+
super().__init__(blocksize, fetcher, size)
|
255 |
+
self.nblocks = math.ceil(size / blocksize)
|
256 |
+
self.maxblocks = maxblocks
|
257 |
+
self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block)
|
258 |
+
|
259 |
+
def __repr__(self) -> str:
|
260 |
+
return (
|
261 |
+
f"<BlockCache blocksize={self.blocksize}, "
|
262 |
+
f"size={self.size}, nblocks={self.nblocks}>"
|
263 |
+
)
|
264 |
+
|
265 |
+
def cache_info(self):
|
266 |
+
"""
|
267 |
+
The statistics on the block cache.
|
268 |
+
|
269 |
+
Returns
|
270 |
+
-------
|
271 |
+
NamedTuple
|
272 |
+
Returned directly from the LRU Cache used internally.
|
273 |
+
"""
|
274 |
+
return self._fetch_block_cached.cache_info()
|
275 |
+
|
276 |
+
def __getstate__(self) -> dict[str, Any]:
|
277 |
+
state = self.__dict__
|
278 |
+
del state["_fetch_block_cached"]
|
279 |
+
return state
|
280 |
+
|
281 |
+
def __setstate__(self, state: dict[str, Any]) -> None:
|
282 |
+
self.__dict__.update(state)
|
283 |
+
self._fetch_block_cached = functools.lru_cache(state["maxblocks"])(
|
284 |
+
self._fetch_block
|
285 |
+
)
|
286 |
+
|
287 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
288 |
+
if start is None:
|
289 |
+
start = 0
|
290 |
+
if end is None:
|
291 |
+
end = self.size
|
292 |
+
if start >= self.size or start >= end:
|
293 |
+
return b""
|
294 |
+
|
295 |
+
# byte position -> block numbers
|
296 |
+
start_block_number = start // self.blocksize
|
297 |
+
end_block_number = end // self.blocksize
|
298 |
+
|
299 |
+
# these are cached, so safe to do multiple calls for the same start and end.
|
300 |
+
for block_number in range(start_block_number, end_block_number + 1):
|
301 |
+
self._fetch_block_cached(block_number)
|
302 |
+
|
303 |
+
return self._read_cache(
|
304 |
+
start,
|
305 |
+
end,
|
306 |
+
start_block_number=start_block_number,
|
307 |
+
end_block_number=end_block_number,
|
308 |
+
)
|
309 |
+
|
310 |
+
def _fetch_block(self, block_number: int) -> bytes:
|
311 |
+
"""
|
312 |
+
Fetch the block of data for `block_number`.
|
313 |
+
"""
|
314 |
+
if block_number > self.nblocks:
|
315 |
+
raise ValueError(
|
316 |
+
f"'block_number={block_number}' is greater than "
|
317 |
+
f"the number of blocks ({self.nblocks})"
|
318 |
+
)
|
319 |
+
|
320 |
+
start = block_number * self.blocksize
|
321 |
+
end = start + self.blocksize
|
322 |
+
logger.info("BlockCache fetching block %d", block_number)
|
323 |
+
block_contents = super()._fetch(start, end)
|
324 |
+
return block_contents
|
325 |
+
|
326 |
+
def _read_cache(
|
327 |
+
self, start: int, end: int, start_block_number: int, end_block_number: int
|
328 |
+
) -> bytes:
|
329 |
+
"""
|
330 |
+
Read from our block cache.
|
331 |
+
|
332 |
+
Parameters
|
333 |
+
----------
|
334 |
+
start, end : int
|
335 |
+
The start and end byte positions.
|
336 |
+
start_block_number, end_block_number : int
|
337 |
+
The start and end block numbers.
|
338 |
+
"""
|
339 |
+
start_pos = start % self.blocksize
|
340 |
+
end_pos = end % self.blocksize
|
341 |
+
|
342 |
+
if start_block_number == end_block_number:
|
343 |
+
block: bytes = self._fetch_block_cached(start_block_number)
|
344 |
+
return block[start_pos:end_pos]
|
345 |
+
|
346 |
+
else:
|
347 |
+
# read from the initial
|
348 |
+
out = []
|
349 |
+
out.append(self._fetch_block_cached(start_block_number)[start_pos:])
|
350 |
+
|
351 |
+
# intermediate blocks
|
352 |
+
# Note: it'd be nice to combine these into one big request. However
|
353 |
+
# that doesn't play nicely with our LRU cache.
|
354 |
+
for block_number in range(start_block_number + 1, end_block_number):
|
355 |
+
out.append(self._fetch_block_cached(block_number))
|
356 |
+
|
357 |
+
# final block
|
358 |
+
out.append(self._fetch_block_cached(end_block_number)[:end_pos])
|
359 |
+
|
360 |
+
return b"".join(out)
|
361 |
+
|
362 |
+
|
363 |
+
class BytesCache(BaseCache):
|
364 |
+
"""Cache which holds data in a in-memory bytes object
|
365 |
+
|
366 |
+
Implements read-ahead by the block size, for semi-random reads progressing
|
367 |
+
through the file.
|
368 |
+
|
369 |
+
Parameters
|
370 |
+
----------
|
371 |
+
trim: bool
|
372 |
+
As we read more data, whether to discard the start of the buffer when
|
373 |
+
we are more than a blocksize ahead of it.
|
374 |
+
"""
|
375 |
+
|
376 |
+
name: ClassVar[str] = "bytes"
|
377 |
+
|
378 |
+
def __init__(
|
379 |
+
self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True
|
380 |
+
) -> None:
|
381 |
+
super().__init__(blocksize, fetcher, size)
|
382 |
+
self.cache = b""
|
383 |
+
self.start: int | None = None
|
384 |
+
self.end: int | None = None
|
385 |
+
self.trim = trim
|
386 |
+
|
387 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
388 |
+
# TODO: only set start/end after fetch, in case it fails?
|
389 |
+
# is this where retry logic might go?
|
390 |
+
if start is None:
|
391 |
+
start = 0
|
392 |
+
if end is None:
|
393 |
+
end = self.size
|
394 |
+
if start >= self.size or start >= end:
|
395 |
+
return b""
|
396 |
+
if (
|
397 |
+
self.start is not None
|
398 |
+
and start >= self.start
|
399 |
+
and self.end is not None
|
400 |
+
and end < self.end
|
401 |
+
):
|
402 |
+
# cache hit: we have all the required data
|
403 |
+
offset = start - self.start
|
404 |
+
return self.cache[offset : offset + end - start]
|
405 |
+
|
406 |
+
if self.blocksize:
|
407 |
+
bend = min(self.size, end + self.blocksize)
|
408 |
+
else:
|
409 |
+
bend = end
|
410 |
+
|
411 |
+
if bend == start or start > self.size:
|
412 |
+
return b""
|
413 |
+
|
414 |
+
if (self.start is None or start < self.start) and (
|
415 |
+
self.end is None or end > self.end
|
416 |
+
):
|
417 |
+
# First read, or extending both before and after
|
418 |
+
self.cache = self.fetcher(start, bend)
|
419 |
+
self.start = start
|
420 |
+
else:
|
421 |
+
assert self.start is not None
|
422 |
+
assert self.end is not None
|
423 |
+
|
424 |
+
if start < self.start:
|
425 |
+
if self.end is None or self.end - end > self.blocksize:
|
426 |
+
self.cache = self.fetcher(start, bend)
|
427 |
+
self.start = start
|
428 |
+
else:
|
429 |
+
new = self.fetcher(start, self.start)
|
430 |
+
self.start = start
|
431 |
+
self.cache = new + self.cache
|
432 |
+
elif self.end is not None and bend > self.end:
|
433 |
+
if self.end > self.size:
|
434 |
+
pass
|
435 |
+
elif end - self.end > self.blocksize:
|
436 |
+
self.cache = self.fetcher(start, bend)
|
437 |
+
self.start = start
|
438 |
+
else:
|
439 |
+
new = self.fetcher(self.end, bend)
|
440 |
+
self.cache = self.cache + new
|
441 |
+
|
442 |
+
self.end = self.start + len(self.cache)
|
443 |
+
offset = start - self.start
|
444 |
+
out = self.cache[offset : offset + end - start]
|
445 |
+
if self.trim:
|
446 |
+
num = (self.end - self.start) // (self.blocksize + 1)
|
447 |
+
if num > 1:
|
448 |
+
self.start += self.blocksize * num
|
449 |
+
self.cache = self.cache[self.blocksize * num :]
|
450 |
+
return out
|
451 |
+
|
452 |
+
def __len__(self) -> int:
|
453 |
+
return len(self.cache)
|
454 |
+
|
455 |
+
|
456 |
+
class AllBytes(BaseCache):
|
457 |
+
"""Cache entire contents of the file"""
|
458 |
+
|
459 |
+
name: ClassVar[str] = "all"
|
460 |
+
|
461 |
+
def __init__(
|
462 |
+
self,
|
463 |
+
blocksize: int | None = None,
|
464 |
+
fetcher: Fetcher | None = None,
|
465 |
+
size: int | None = None,
|
466 |
+
data: bytes | None = None,
|
467 |
+
) -> None:
|
468 |
+
super().__init__(blocksize, fetcher, size) # type: ignore[arg-type]
|
469 |
+
if data is None:
|
470 |
+
data = self.fetcher(0, self.size)
|
471 |
+
self.data = data
|
472 |
+
|
473 |
+
def _fetch(self, start: int | None, stop: int | None) -> bytes:
|
474 |
+
return self.data[start:stop]
|
475 |
+
|
476 |
+
|
477 |
+
class KnownPartsOfAFile(BaseCache):
|
478 |
+
"""
|
479 |
+
Cache holding known file parts.
|
480 |
+
|
481 |
+
Parameters
|
482 |
+
----------
|
483 |
+
blocksize: int
|
484 |
+
How far to read ahead in numbers of bytes
|
485 |
+
fetcher: func
|
486 |
+
Function of the form f(start, end) which gets bytes from remote as
|
487 |
+
specified
|
488 |
+
size: int
|
489 |
+
How big this file is
|
490 |
+
data: dict
|
491 |
+
A dictionary mapping explicit `(start, stop)` file-offset tuples
|
492 |
+
with known bytes.
|
493 |
+
strict: bool, default True
|
494 |
+
Whether to fetch reads that go beyond a known byte-range boundary.
|
495 |
+
If `False`, any read that ends outside a known part will be zero
|
496 |
+
padded. Note that zero padding will not be used for reads that
|
497 |
+
begin outside a known byte-range.
|
498 |
+
"""
|
499 |
+
|
500 |
+
name: ClassVar[str] = "parts"
|
501 |
+
|
502 |
+
def __init__(
|
503 |
+
self,
|
504 |
+
blocksize: int,
|
505 |
+
fetcher: Fetcher,
|
506 |
+
size: int,
|
507 |
+
data: dict[tuple[int, int], bytes] = {},
|
508 |
+
strict: bool = True,
|
509 |
+
**_: Any,
|
510 |
+
):
|
511 |
+
super().__init__(blocksize, fetcher, size)
|
512 |
+
self.strict = strict
|
513 |
+
|
514 |
+
# simple consolidation of contiguous blocks
|
515 |
+
if data:
|
516 |
+
old_offsets = sorted(data.keys())
|
517 |
+
offsets = [old_offsets[0]]
|
518 |
+
blocks = [data.pop(old_offsets[0])]
|
519 |
+
for start, stop in old_offsets[1:]:
|
520 |
+
start0, stop0 = offsets[-1]
|
521 |
+
if start == stop0:
|
522 |
+
offsets[-1] = (start0, stop)
|
523 |
+
blocks[-1] += data.pop((start, stop))
|
524 |
+
else:
|
525 |
+
offsets.append((start, stop))
|
526 |
+
blocks.append(data.pop((start, stop)))
|
527 |
+
|
528 |
+
self.data = dict(zip(offsets, blocks))
|
529 |
+
else:
|
530 |
+
self.data = data
|
531 |
+
|
532 |
+
def _fetch(self, start: int | None, stop: int | None) -> bytes:
|
533 |
+
if start is None:
|
534 |
+
start = 0
|
535 |
+
if stop is None:
|
536 |
+
stop = self.size
|
537 |
+
|
538 |
+
out = b""
|
539 |
+
for (loc0, loc1), data in self.data.items():
|
540 |
+
# If self.strict=False, use zero-padded data
|
541 |
+
# for reads beyond the end of a "known" buffer
|
542 |
+
if loc0 <= start < loc1:
|
543 |
+
off = start - loc0
|
544 |
+
out = data[off : off + stop - start]
|
545 |
+
if not self.strict or loc0 <= stop <= loc1:
|
546 |
+
# The request is within a known range, or
|
547 |
+
# it begins within a known range, and we
|
548 |
+
# are allowed to pad reads beyond the
|
549 |
+
# buffer with zero
|
550 |
+
out += b"\x00" * (stop - start - len(out))
|
551 |
+
return out
|
552 |
+
else:
|
553 |
+
# The request ends outside a known range,
|
554 |
+
# and we are being "strict" about reads
|
555 |
+
# beyond the buffer
|
556 |
+
start = loc1
|
557 |
+
break
|
558 |
+
|
559 |
+
# We only get here if there is a request outside the
|
560 |
+
# known parts of the file. In an ideal world, this
|
561 |
+
# should never happen
|
562 |
+
if self.fetcher is None:
|
563 |
+
# We cannot fetch the data, so raise an error
|
564 |
+
raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ")
|
565 |
+
# We can fetch the data, but should warn the user
|
566 |
+
# that this may be slow
|
567 |
+
warnings.warn(
|
568 |
+
f"Read is outside the known file parts: {(start, stop)}. "
|
569 |
+
f"IO/caching performance may be poor!"
|
570 |
+
)
|
571 |
+
logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}")
|
572 |
+
return out + super()._fetch(start, stop)
|
573 |
+
|
574 |
+
|
575 |
+
class UpdatableLRU(Generic[P, T]):
|
576 |
+
"""
|
577 |
+
Custom implementation of LRU cache that allows updating keys
|
578 |
+
|
579 |
+
Used by BackgroudBlockCache
|
580 |
+
"""
|
581 |
+
|
582 |
+
class CacheInfo(NamedTuple):
|
583 |
+
hits: int
|
584 |
+
misses: int
|
585 |
+
maxsize: int
|
586 |
+
currsize: int
|
587 |
+
|
588 |
+
def __init__(self, func: Callable[P, T], max_size: int = 128) -> None:
|
589 |
+
self._cache: OrderedDict[Any, T] = collections.OrderedDict()
|
590 |
+
self._func = func
|
591 |
+
self._max_size = max_size
|
592 |
+
self._hits = 0
|
593 |
+
self._misses = 0
|
594 |
+
self._lock = threading.Lock()
|
595 |
+
|
596 |
+
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T:
|
597 |
+
if kwargs:
|
598 |
+
raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}")
|
599 |
+
with self._lock:
|
600 |
+
if args in self._cache:
|
601 |
+
self._cache.move_to_end(args)
|
602 |
+
self._hits += 1
|
603 |
+
return self._cache[args]
|
604 |
+
|
605 |
+
result = self._func(*args, **kwargs)
|
606 |
+
|
607 |
+
with self._lock:
|
608 |
+
self._cache[args] = result
|
609 |
+
self._misses += 1
|
610 |
+
if len(self._cache) > self._max_size:
|
611 |
+
self._cache.popitem(last=False)
|
612 |
+
|
613 |
+
return result
|
614 |
+
|
615 |
+
def is_key_cached(self, *args: Any) -> bool:
|
616 |
+
with self._lock:
|
617 |
+
return args in self._cache
|
618 |
+
|
619 |
+
def add_key(self, result: T, *args: Any) -> None:
|
620 |
+
with self._lock:
|
621 |
+
self._cache[args] = result
|
622 |
+
if len(self._cache) > self._max_size:
|
623 |
+
self._cache.popitem(last=False)
|
624 |
+
|
625 |
+
def cache_info(self) -> UpdatableLRU.CacheInfo:
|
626 |
+
with self._lock:
|
627 |
+
return self.CacheInfo(
|
628 |
+
maxsize=self._max_size,
|
629 |
+
currsize=len(self._cache),
|
630 |
+
hits=self._hits,
|
631 |
+
misses=self._misses,
|
632 |
+
)
|
633 |
+
|
634 |
+
|
635 |
+
class BackgroundBlockCache(BaseCache):
|
636 |
+
"""
|
637 |
+
Cache holding memory as a set of blocks with pre-loading of
|
638 |
+
the next block in the background.
|
639 |
+
|
640 |
+
Requests are only ever made ``blocksize`` at a time, and are
|
641 |
+
stored in an LRU cache. The least recently accessed block is
|
642 |
+
discarded when more than ``maxblocks`` are stored. If the
|
643 |
+
next block is not in cache, it is loaded in a separate thread
|
644 |
+
in non-blocking way.
|
645 |
+
|
646 |
+
Parameters
|
647 |
+
----------
|
648 |
+
blocksize : int
|
649 |
+
The number of bytes to store in each block.
|
650 |
+
Requests are only ever made for ``blocksize``, so this
|
651 |
+
should balance the overhead of making a request against
|
652 |
+
the granularity of the blocks.
|
653 |
+
fetcher : Callable
|
654 |
+
size : int
|
655 |
+
The total size of the file being cached.
|
656 |
+
maxblocks : int
|
657 |
+
The maximum number of blocks to cache for. The maximum memory
|
658 |
+
use for this cache is then ``blocksize * maxblocks``.
|
659 |
+
"""
|
660 |
+
|
661 |
+
name: ClassVar[str] = "background"
|
662 |
+
|
663 |
+
def __init__(
|
664 |
+
self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
|
665 |
+
) -> None:
|
666 |
+
super().__init__(blocksize, fetcher, size)
|
667 |
+
self.nblocks = math.ceil(size / blocksize)
|
668 |
+
self.maxblocks = maxblocks
|
669 |
+
self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks)
|
670 |
+
|
671 |
+
self._thread_executor = ThreadPoolExecutor(max_workers=1)
|
672 |
+
self._fetch_future_block_number: int | None = None
|
673 |
+
self._fetch_future: Future[bytes] | None = None
|
674 |
+
self._fetch_future_lock = threading.Lock()
|
675 |
+
|
676 |
+
def __repr__(self) -> str:
|
677 |
+
return (
|
678 |
+
f"<BackgroundBlockCache blocksize={self.blocksize}, "
|
679 |
+
f"size={self.size}, nblocks={self.nblocks}>"
|
680 |
+
)
|
681 |
+
|
682 |
+
def cache_info(self) -> UpdatableLRU.CacheInfo:
|
683 |
+
"""
|
684 |
+
The statistics on the block cache.
|
685 |
+
|
686 |
+
Returns
|
687 |
+
-------
|
688 |
+
NamedTuple
|
689 |
+
Returned directly from the LRU Cache used internally.
|
690 |
+
"""
|
691 |
+
return self._fetch_block_cached.cache_info()
|
692 |
+
|
693 |
+
def __getstate__(self) -> dict[str, Any]:
|
694 |
+
state = self.__dict__
|
695 |
+
del state["_fetch_block_cached"]
|
696 |
+
del state["_thread_executor"]
|
697 |
+
del state["_fetch_future_block_number"]
|
698 |
+
del state["_fetch_future"]
|
699 |
+
del state["_fetch_future_lock"]
|
700 |
+
return state
|
701 |
+
|
702 |
+
def __setstate__(self, state) -> None:
|
703 |
+
self.__dict__.update(state)
|
704 |
+
self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"])
|
705 |
+
self._thread_executor = ThreadPoolExecutor(max_workers=1)
|
706 |
+
self._fetch_future_block_number = None
|
707 |
+
self._fetch_future = None
|
708 |
+
self._fetch_future_lock = threading.Lock()
|
709 |
+
|
710 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
711 |
+
if start is None:
|
712 |
+
start = 0
|
713 |
+
if end is None:
|
714 |
+
end = self.size
|
715 |
+
if start >= self.size or start >= end:
|
716 |
+
return b""
|
717 |
+
|
718 |
+
# byte position -> block numbers
|
719 |
+
start_block_number = start // self.blocksize
|
720 |
+
end_block_number = end // self.blocksize
|
721 |
+
|
722 |
+
fetch_future_block_number = None
|
723 |
+
fetch_future = None
|
724 |
+
with self._fetch_future_lock:
|
725 |
+
# Background thread is running. Check we we can or must join it.
|
726 |
+
if self._fetch_future is not None:
|
727 |
+
assert self._fetch_future_block_number is not None
|
728 |
+
if self._fetch_future.done():
|
729 |
+
logger.info("BlockCache joined background fetch without waiting.")
|
730 |
+
self._fetch_block_cached.add_key(
|
731 |
+
self._fetch_future.result(), self._fetch_future_block_number
|
732 |
+
)
|
733 |
+
# Cleanup the fetch variables. Done with fetching the block.
|
734 |
+
self._fetch_future_block_number = None
|
735 |
+
self._fetch_future = None
|
736 |
+
else:
|
737 |
+
# Must join if we need the block for the current fetch
|
738 |
+
must_join = bool(
|
739 |
+
start_block_number
|
740 |
+
<= self._fetch_future_block_number
|
741 |
+
<= end_block_number
|
742 |
+
)
|
743 |
+
if must_join:
|
744 |
+
# Copy to the local variables to release lock
|
745 |
+
# before waiting for result
|
746 |
+
fetch_future_block_number = self._fetch_future_block_number
|
747 |
+
fetch_future = self._fetch_future
|
748 |
+
|
749 |
+
# Cleanup the fetch variables. Have a local copy.
|
750 |
+
self._fetch_future_block_number = None
|
751 |
+
self._fetch_future = None
|
752 |
+
|
753 |
+
# Need to wait for the future for the current read
|
754 |
+
if fetch_future is not None:
|
755 |
+
logger.info("BlockCache waiting for background fetch.")
|
756 |
+
# Wait until result and put it in cache
|
757 |
+
self._fetch_block_cached.add_key(
|
758 |
+
fetch_future.result(), fetch_future_block_number
|
759 |
+
)
|
760 |
+
|
761 |
+
# these are cached, so safe to do multiple calls for the same start and end.
|
762 |
+
for block_number in range(start_block_number, end_block_number + 1):
|
763 |
+
self._fetch_block_cached(block_number)
|
764 |
+
|
765 |
+
# fetch next block in the background if nothing is running in the background,
|
766 |
+
# the block is within file and it is not already cached
|
767 |
+
end_block_plus_1 = end_block_number + 1
|
768 |
+
with self._fetch_future_lock:
|
769 |
+
if (
|
770 |
+
self._fetch_future is None
|
771 |
+
and end_block_plus_1 <= self.nblocks
|
772 |
+
and not self._fetch_block_cached.is_key_cached(end_block_plus_1)
|
773 |
+
):
|
774 |
+
self._fetch_future_block_number = end_block_plus_1
|
775 |
+
self._fetch_future = self._thread_executor.submit(
|
776 |
+
self._fetch_block, end_block_plus_1, "async"
|
777 |
+
)
|
778 |
+
|
779 |
+
return self._read_cache(
|
780 |
+
start,
|
781 |
+
end,
|
782 |
+
start_block_number=start_block_number,
|
783 |
+
end_block_number=end_block_number,
|
784 |
+
)
|
785 |
+
|
786 |
+
def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes:
|
787 |
+
"""
|
788 |
+
Fetch the block of data for `block_number`.
|
789 |
+
"""
|
790 |
+
if block_number > self.nblocks:
|
791 |
+
raise ValueError(
|
792 |
+
f"'block_number={block_number}' is greater than "
|
793 |
+
f"the number of blocks ({self.nblocks})"
|
794 |
+
)
|
795 |
+
|
796 |
+
start = block_number * self.blocksize
|
797 |
+
end = start + self.blocksize
|
798 |
+
logger.info("BlockCache fetching block (%s) %d", log_info, block_number)
|
799 |
+
block_contents = super()._fetch(start, end)
|
800 |
+
return block_contents
|
801 |
+
|
802 |
+
def _read_cache(
|
803 |
+
self, start: int, end: int, start_block_number: int, end_block_number: int
|
804 |
+
) -> bytes:
|
805 |
+
"""
|
806 |
+
Read from our block cache.
|
807 |
+
|
808 |
+
Parameters
|
809 |
+
----------
|
810 |
+
start, end : int
|
811 |
+
The start and end byte positions.
|
812 |
+
start_block_number, end_block_number : int
|
813 |
+
The start and end block numbers.
|
814 |
+
"""
|
815 |
+
start_pos = start % self.blocksize
|
816 |
+
end_pos = end % self.blocksize
|
817 |
+
|
818 |
+
if start_block_number == end_block_number:
|
819 |
+
block = self._fetch_block_cached(start_block_number)
|
820 |
+
return block[start_pos:end_pos]
|
821 |
+
|
822 |
+
else:
|
823 |
+
# read from the initial
|
824 |
+
out = []
|
825 |
+
out.append(self._fetch_block_cached(start_block_number)[start_pos:])
|
826 |
+
|
827 |
+
# intermediate blocks
|
828 |
+
# Note: it'd be nice to combine these into one big request. However
|
829 |
+
# that doesn't play nicely with our LRU cache.
|
830 |
+
for block_number in range(start_block_number + 1, end_block_number):
|
831 |
+
out.append(self._fetch_block_cached(block_number))
|
832 |
+
|
833 |
+
# final block
|
834 |
+
out.append(self._fetch_block_cached(end_block_number)[:end_pos])
|
835 |
+
|
836 |
+
return b"".join(out)
|
837 |
+
|
838 |
+
|
839 |
+
caches: dict[str | None, type[BaseCache]] = {
|
840 |
+
# one custom case
|
841 |
+
None: BaseCache,
|
842 |
+
}
|
843 |
+
|
844 |
+
|
845 |
+
def register_cache(cls: type[BaseCache], clobber: bool = False) -> None:
|
846 |
+
"""'Register' cache implementation.
|
847 |
+
|
848 |
+
Parameters
|
849 |
+
----------
|
850 |
+
clobber: bool, optional
|
851 |
+
If set to True (default is False) - allow to overwrite existing
|
852 |
+
entry.
|
853 |
+
|
854 |
+
Raises
|
855 |
+
------
|
856 |
+
ValueError
|
857 |
+
"""
|
858 |
+
name = cls.name
|
859 |
+
if not clobber and name in caches:
|
860 |
+
raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}")
|
861 |
+
caches[name] = cls
|
862 |
+
|
863 |
+
|
864 |
+
for c in (
|
865 |
+
BaseCache,
|
866 |
+
MMapCache,
|
867 |
+
BytesCache,
|
868 |
+
ReadAheadCache,
|
869 |
+
BlockCache,
|
870 |
+
FirstChunkCache,
|
871 |
+
AllBytes,
|
872 |
+
KnownPartsOfAFile,
|
873 |
+
BackgroundBlockCache,
|
874 |
+
):
|
875 |
+
register_cache(c)
|
env-llmeval/lib/python3.10/site-packages/fsspec/compression.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Helper functions for a standard streaming compression API"""
|
2 |
+
from zipfile import ZipFile
|
3 |
+
|
4 |
+
import fsspec.utils
|
5 |
+
from fsspec.spec import AbstractBufferedFile
|
6 |
+
|
7 |
+
|
8 |
+
def noop_file(file, mode, **kwargs):
|
9 |
+
return file
|
10 |
+
|
11 |
+
|
12 |
+
# TODO: files should also be available as contexts
|
13 |
+
# should be functions of the form func(infile, mode=, **kwargs) -> file-like
|
14 |
+
compr = {None: noop_file}
|
15 |
+
|
16 |
+
|
17 |
+
def register_compression(name, callback, extensions, force=False):
|
18 |
+
"""Register an "inferable" file compression type.
|
19 |
+
|
20 |
+
Registers transparent file compression type for use with fsspec.open.
|
21 |
+
Compression can be specified by name in open, or "infer"-ed for any files
|
22 |
+
ending with the given extensions.
|
23 |
+
|
24 |
+
Args:
|
25 |
+
name: (str) The compression type name. Eg. "gzip".
|
26 |
+
callback: A callable of form (infile, mode, **kwargs) -> file-like.
|
27 |
+
Accepts an input file-like object, the target mode and kwargs.
|
28 |
+
Returns a wrapped file-like object.
|
29 |
+
extensions: (str, Iterable[str]) A file extension, or list of file
|
30 |
+
extensions for which to infer this compression scheme. Eg. "gz".
|
31 |
+
force: (bool) Force re-registration of compression type or extensions.
|
32 |
+
|
33 |
+
Raises:
|
34 |
+
ValueError: If name or extensions already registered, and not force.
|
35 |
+
|
36 |
+
"""
|
37 |
+
if isinstance(extensions, str):
|
38 |
+
extensions = [extensions]
|
39 |
+
|
40 |
+
# Validate registration
|
41 |
+
if name in compr and not force:
|
42 |
+
raise ValueError(f"Duplicate compression registration: {name}")
|
43 |
+
|
44 |
+
for ext in extensions:
|
45 |
+
if ext in fsspec.utils.compressions and not force:
|
46 |
+
raise ValueError(f"Duplicate compression file extension: {ext} ({name})")
|
47 |
+
|
48 |
+
compr[name] = callback
|
49 |
+
|
50 |
+
for ext in extensions:
|
51 |
+
fsspec.utils.compressions[ext] = name
|
52 |
+
|
53 |
+
|
54 |
+
def unzip(infile, mode="rb", filename=None, **kwargs):
|
55 |
+
if "r" not in mode:
|
56 |
+
filename = filename or "file"
|
57 |
+
z = ZipFile(infile, mode="w", **kwargs)
|
58 |
+
fo = z.open(filename, mode="w")
|
59 |
+
fo.close = lambda closer=fo.close: closer() or z.close()
|
60 |
+
return fo
|
61 |
+
z = ZipFile(infile)
|
62 |
+
if filename is None:
|
63 |
+
filename = z.namelist()[0]
|
64 |
+
return z.open(filename, mode="r", **kwargs)
|
65 |
+
|
66 |
+
|
67 |
+
register_compression("zip", unzip, "zip")
|
68 |
+
|
69 |
+
try:
|
70 |
+
from bz2 import BZ2File
|
71 |
+
except ImportError:
|
72 |
+
pass
|
73 |
+
else:
|
74 |
+
register_compression("bz2", BZ2File, "bz2")
|
75 |
+
|
76 |
+
try: # pragma: no cover
|
77 |
+
from isal import igzip
|
78 |
+
|
79 |
+
def isal(infile, mode="rb", **kwargs):
|
80 |
+
return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs)
|
81 |
+
|
82 |
+
register_compression("gzip", isal, "gz")
|
83 |
+
except ImportError:
|
84 |
+
from gzip import GzipFile
|
85 |
+
|
86 |
+
register_compression(
|
87 |
+
"gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz"
|
88 |
+
)
|
89 |
+
|
90 |
+
try:
|
91 |
+
from lzma import LZMAFile
|
92 |
+
|
93 |
+
register_compression("lzma", LZMAFile, "lzma")
|
94 |
+
register_compression("xz", LZMAFile, "xz")
|
95 |
+
except ImportError:
|
96 |
+
pass
|
97 |
+
|
98 |
+
try:
|
99 |
+
import lzmaffi
|
100 |
+
|
101 |
+
register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True)
|
102 |
+
register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
|
103 |
+
except ImportError:
|
104 |
+
pass
|
105 |
+
|
106 |
+
|
107 |
+
class SnappyFile(AbstractBufferedFile):
|
108 |
+
def __init__(self, infile, mode, **kwargs):
|
109 |
+
import snappy
|
110 |
+
|
111 |
+
super().__init__(
|
112 |
+
fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs
|
113 |
+
)
|
114 |
+
self.infile = infile
|
115 |
+
if "r" in mode:
|
116 |
+
self.codec = snappy.StreamDecompressor()
|
117 |
+
else:
|
118 |
+
self.codec = snappy.StreamCompressor()
|
119 |
+
|
120 |
+
def _upload_chunk(self, final=False):
|
121 |
+
self.buffer.seek(0)
|
122 |
+
out = self.codec.add_chunk(self.buffer.read())
|
123 |
+
self.infile.write(out)
|
124 |
+
return True
|
125 |
+
|
126 |
+
def seek(self, loc, whence=0):
|
127 |
+
raise NotImplementedError("SnappyFile is not seekable")
|
128 |
+
|
129 |
+
def seekable(self):
|
130 |
+
return False
|
131 |
+
|
132 |
+
def _fetch_range(self, start, end):
|
133 |
+
"""Get the specified set of bytes from remote"""
|
134 |
+
data = self.infile.read(end - start)
|
135 |
+
return self.codec.decompress(data)
|
136 |
+
|
137 |
+
|
138 |
+
try:
|
139 |
+
import snappy
|
140 |
+
|
141 |
+
snappy.compress
|
142 |
+
# Snappy may use the .sz file extension, but this is not part of the
|
143 |
+
# standard implementation.
|
144 |
+
register_compression("snappy", SnappyFile, [])
|
145 |
+
|
146 |
+
except (ImportError, NameError, AttributeError):
|
147 |
+
pass
|
148 |
+
|
149 |
+
try:
|
150 |
+
import lz4.frame
|
151 |
+
|
152 |
+
register_compression("lz4", lz4.frame.open, "lz4")
|
153 |
+
except ImportError:
|
154 |
+
pass
|
155 |
+
|
156 |
+
try:
|
157 |
+
import zstandard as zstd
|
158 |
+
|
159 |
+
def zstandard_file(infile, mode="rb"):
|
160 |
+
if "r" in mode:
|
161 |
+
cctx = zstd.ZstdDecompressor()
|
162 |
+
return cctx.stream_reader(infile)
|
163 |
+
else:
|
164 |
+
cctx = zstd.ZstdCompressor(level=10)
|
165 |
+
return cctx.stream_writer(infile)
|
166 |
+
|
167 |
+
register_compression("zstd", zstandard_file, "zst")
|
168 |
+
except ImportError:
|
169 |
+
pass
|
170 |
+
|
171 |
+
|
172 |
+
def available_compressions():
|
173 |
+
"""Return a list of the implemented compressions."""
|
174 |
+
return list(compr)
|
env-llmeval/lib/python3.10/site-packages/fsspec/config.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import configparser
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import warnings
|
7 |
+
from typing import Any
|
8 |
+
|
9 |
+
conf: dict[str, dict[str, Any]] = {}
|
10 |
+
default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
|
11 |
+
conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
|
12 |
+
|
13 |
+
|
14 |
+
def set_conf_env(conf_dict, envdict=os.environ):
|
15 |
+
"""Set config values from environment variables
|
16 |
+
|
17 |
+
Looks for variables of the form ``FSSPEC_<protocol>`` and
|
18 |
+
``FSSPEC_<protocol>_<kwarg>``. For ``FSSPEC_<protocol>`` the value is parsed
|
19 |
+
as a json dictionary and used to ``update`` the config of the
|
20 |
+
corresponding protocol. For ``FSSPEC_<protocol>_<kwarg>`` there is no
|
21 |
+
attempt to convert the string value, but the kwarg keys will be lower-cased.
|
22 |
+
|
23 |
+
The ``FSSPEC_<protocol>_<kwarg>`` variables are applied after the
|
24 |
+
``FSSPEC_<protocol>`` ones.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
conf_dict : dict(str, dict)
|
29 |
+
This dict will be mutated
|
30 |
+
envdict : dict-like(str, str)
|
31 |
+
Source for the values - usually the real environment
|
32 |
+
"""
|
33 |
+
kwarg_keys = []
|
34 |
+
for key in envdict:
|
35 |
+
if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_":
|
36 |
+
if key.count("_") > 1:
|
37 |
+
kwarg_keys.append(key)
|
38 |
+
continue
|
39 |
+
try:
|
40 |
+
value = json.loads(envdict[key])
|
41 |
+
except json.decoder.JSONDecodeError as ex:
|
42 |
+
warnings.warn(
|
43 |
+
f"Ignoring environment variable {key} due to a parse failure: {ex}"
|
44 |
+
)
|
45 |
+
else:
|
46 |
+
if isinstance(value, dict):
|
47 |
+
_, proto = key.split("_", 1)
|
48 |
+
conf_dict.setdefault(proto.lower(), {}).update(value)
|
49 |
+
else:
|
50 |
+
warnings.warn(
|
51 |
+
f"Ignoring environment variable {key} due to not being a dict:"
|
52 |
+
f" {type(value)}"
|
53 |
+
)
|
54 |
+
elif key.startswith("FSSPEC"):
|
55 |
+
warnings.warn(
|
56 |
+
f"Ignoring environment variable {key} due to having an unexpected name"
|
57 |
+
)
|
58 |
+
|
59 |
+
for key in kwarg_keys:
|
60 |
+
_, proto, kwarg = key.split("_", 2)
|
61 |
+
conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
|
62 |
+
|
63 |
+
|
64 |
+
def set_conf_files(cdir, conf_dict):
|
65 |
+
"""Set config values from files
|
66 |
+
|
67 |
+
Scans for INI and JSON files in the given dictionary, and uses their
|
68 |
+
contents to set the config. In case of repeated values, later values
|
69 |
+
win.
|
70 |
+
|
71 |
+
In the case of INI files, all values are strings, and these will not
|
72 |
+
be converted.
|
73 |
+
|
74 |
+
Parameters
|
75 |
+
----------
|
76 |
+
cdir : str
|
77 |
+
Directory to search
|
78 |
+
conf_dict : dict(str, dict)
|
79 |
+
This dict will be mutated
|
80 |
+
"""
|
81 |
+
if not os.path.isdir(cdir):
|
82 |
+
return
|
83 |
+
allfiles = sorted(os.listdir(cdir))
|
84 |
+
for fn in allfiles:
|
85 |
+
if fn.endswith(".ini"):
|
86 |
+
ini = configparser.ConfigParser()
|
87 |
+
ini.read(os.path.join(cdir, fn))
|
88 |
+
for key in ini:
|
89 |
+
if key == "DEFAULT":
|
90 |
+
continue
|
91 |
+
conf_dict.setdefault(key, {}).update(dict(ini[key]))
|
92 |
+
if fn.endswith(".json"):
|
93 |
+
with open(os.path.join(cdir, fn)) as f:
|
94 |
+
js = json.load(f)
|
95 |
+
for key in js:
|
96 |
+
conf_dict.setdefault(key, {}).update(dict(js[key]))
|
97 |
+
|
98 |
+
|
99 |
+
def apply_config(cls, kwargs, conf_dict=None):
|
100 |
+
"""Supply default values for kwargs when instantiating class
|
101 |
+
|
102 |
+
Augments the passed kwargs, by finding entries in the config dict
|
103 |
+
which match the classes ``.protocol`` attribute (one or more str)
|
104 |
+
|
105 |
+
Parameters
|
106 |
+
----------
|
107 |
+
cls : file system implementation
|
108 |
+
kwargs : dict
|
109 |
+
conf_dict : dict of dict
|
110 |
+
Typically this is the global configuration
|
111 |
+
|
112 |
+
Returns
|
113 |
+
-------
|
114 |
+
dict : the modified set of kwargs
|
115 |
+
"""
|
116 |
+
if conf_dict is None:
|
117 |
+
conf_dict = conf
|
118 |
+
protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
|
119 |
+
kw = {}
|
120 |
+
for proto in protos:
|
121 |
+
# default kwargs from the current state of the config
|
122 |
+
if proto in conf_dict:
|
123 |
+
kw.update(conf_dict[proto])
|
124 |
+
# explicit kwargs always win
|
125 |
+
kw.update(**kwargs)
|
126 |
+
kwargs = kw
|
127 |
+
return kwargs
|
128 |
+
|
129 |
+
|
130 |
+
set_conf_files(conf_dir, conf)
|
131 |
+
set_conf_env(conf)
|
env-llmeval/lib/python3.10/site-packages/fsspec/conftest.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import subprocess
|
4 |
+
import sys
|
5 |
+
import time
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
import fsspec
|
10 |
+
from fsspec.implementations.cached import CachingFileSystem
|
11 |
+
|
12 |
+
|
13 |
+
@pytest.fixture()
|
14 |
+
def m():
|
15 |
+
"""
|
16 |
+
Fixture providing a memory filesystem.
|
17 |
+
"""
|
18 |
+
m = fsspec.filesystem("memory")
|
19 |
+
m.store.clear()
|
20 |
+
m.pseudo_dirs.clear()
|
21 |
+
m.pseudo_dirs.append("")
|
22 |
+
try:
|
23 |
+
yield m
|
24 |
+
finally:
|
25 |
+
m.store.clear()
|
26 |
+
m.pseudo_dirs.clear()
|
27 |
+
m.pseudo_dirs.append("")
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.fixture
|
31 |
+
def ftp_writable(tmpdir):
|
32 |
+
"""
|
33 |
+
Fixture providing a writable FTP filesystem.
|
34 |
+
"""
|
35 |
+
pytest.importorskip("pyftpdlib")
|
36 |
+
from fsspec.implementations.ftp import FTPFileSystem
|
37 |
+
|
38 |
+
FTPFileSystem.clear_instance_cache() # remove lingering connections
|
39 |
+
CachingFileSystem.clear_instance_cache()
|
40 |
+
d = str(tmpdir)
|
41 |
+
with open(os.path.join(d, "out"), "wb") as f:
|
42 |
+
f.write(b"hello" * 10000)
|
43 |
+
P = subprocess.Popen(
|
44 |
+
[sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
|
45 |
+
)
|
46 |
+
try:
|
47 |
+
time.sleep(1)
|
48 |
+
yield "localhost", 2121, "user", "pass"
|
49 |
+
finally:
|
50 |
+
P.terminate()
|
51 |
+
P.wait()
|
52 |
+
try:
|
53 |
+
shutil.rmtree(tmpdir)
|
54 |
+
except Exception:
|
55 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/fsspec/core.py
ADDED
@@ -0,0 +1,710 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import io
|
4 |
+
import logging
|
5 |
+
import os
|
6 |
+
import re
|
7 |
+
from glob import has_magic
|
8 |
+
from pathlib import Path
|
9 |
+
|
10 |
+
# for backwards compat, we export cache things from here too
|
11 |
+
from .caching import ( # noqa: F401
|
12 |
+
BaseCache,
|
13 |
+
BlockCache,
|
14 |
+
BytesCache,
|
15 |
+
MMapCache,
|
16 |
+
ReadAheadCache,
|
17 |
+
caches,
|
18 |
+
)
|
19 |
+
from .compression import compr
|
20 |
+
from .registry import filesystem, get_filesystem_class
|
21 |
+
from .utils import (
|
22 |
+
_unstrip_protocol,
|
23 |
+
build_name_function,
|
24 |
+
infer_compression,
|
25 |
+
stringify_path,
|
26 |
+
)
|
27 |
+
|
28 |
+
logger = logging.getLogger("fsspec")
|
29 |
+
|
30 |
+
|
31 |
+
class OpenFile:
|
32 |
+
"""
|
33 |
+
File-like object to be used in a context
|
34 |
+
|
35 |
+
Can layer (buffered) text-mode and compression over any file-system, which
|
36 |
+
are typically binary-only.
|
37 |
+
|
38 |
+
These instances are safe to serialize, as the low-level file object
|
39 |
+
is not created until invoked using ``with``.
|
40 |
+
|
41 |
+
Parameters
|
42 |
+
----------
|
43 |
+
fs: FileSystem
|
44 |
+
The file system to use for opening the file. Should be a subclass or duck-type
|
45 |
+
with ``fsspec.spec.AbstractFileSystem``
|
46 |
+
path: str
|
47 |
+
Location to open
|
48 |
+
mode: str like 'rb', optional
|
49 |
+
Mode of the opened file
|
50 |
+
compression: str or None, optional
|
51 |
+
Compression to apply
|
52 |
+
encoding: str or None, optional
|
53 |
+
The encoding to use if opened in text mode.
|
54 |
+
errors: str or None, optional
|
55 |
+
How to handle encoding errors if opened in text mode.
|
56 |
+
newline: None or str
|
57 |
+
Passed to TextIOWrapper in text mode, how to handle line endings.
|
58 |
+
autoopen: bool
|
59 |
+
If True, calls open() immediately. Mostly used by pickle
|
60 |
+
pos: int
|
61 |
+
If given and autoopen is True, seek to this location immediately
|
62 |
+
"""
|
63 |
+
|
64 |
+
def __init__(
|
65 |
+
self,
|
66 |
+
fs,
|
67 |
+
path,
|
68 |
+
mode="rb",
|
69 |
+
compression=None,
|
70 |
+
encoding=None,
|
71 |
+
errors=None,
|
72 |
+
newline=None,
|
73 |
+
):
|
74 |
+
self.fs = fs
|
75 |
+
self.path = path
|
76 |
+
self.mode = mode
|
77 |
+
self.compression = get_compression(path, compression)
|
78 |
+
self.encoding = encoding
|
79 |
+
self.errors = errors
|
80 |
+
self.newline = newline
|
81 |
+
self.fobjects = []
|
82 |
+
|
83 |
+
def __reduce__(self):
|
84 |
+
return (
|
85 |
+
OpenFile,
|
86 |
+
(
|
87 |
+
self.fs,
|
88 |
+
self.path,
|
89 |
+
self.mode,
|
90 |
+
self.compression,
|
91 |
+
self.encoding,
|
92 |
+
self.errors,
|
93 |
+
self.newline,
|
94 |
+
),
|
95 |
+
)
|
96 |
+
|
97 |
+
def __repr__(self):
|
98 |
+
return f"<OpenFile '{self.path}'>"
|
99 |
+
|
100 |
+
def __enter__(self):
|
101 |
+
mode = self.mode.replace("t", "").replace("b", "") + "b"
|
102 |
+
|
103 |
+
f = self.fs.open(self.path, mode=mode)
|
104 |
+
|
105 |
+
self.fobjects = [f]
|
106 |
+
|
107 |
+
if self.compression is not None:
|
108 |
+
compress = compr[self.compression]
|
109 |
+
f = compress(f, mode=mode[0])
|
110 |
+
self.fobjects.append(f)
|
111 |
+
|
112 |
+
if "b" not in self.mode:
|
113 |
+
# assume, for example, that 'r' is equivalent to 'rt' as in builtin
|
114 |
+
f = PickleableTextIOWrapper(
|
115 |
+
f, encoding=self.encoding, errors=self.errors, newline=self.newline
|
116 |
+
)
|
117 |
+
self.fobjects.append(f)
|
118 |
+
|
119 |
+
return self.fobjects[-1]
|
120 |
+
|
121 |
+
def __exit__(self, *args):
|
122 |
+
self.close()
|
123 |
+
|
124 |
+
@property
|
125 |
+
def full_name(self):
|
126 |
+
return _unstrip_protocol(self.path, self.fs)
|
127 |
+
|
128 |
+
def open(self):
|
129 |
+
"""Materialise this as a real open file without context
|
130 |
+
|
131 |
+
The OpenFile object should be explicitly closed to avoid enclosed file
|
132 |
+
instances persisting. You must, therefore, keep a reference to the OpenFile
|
133 |
+
during the life of the file-like it generates.
|
134 |
+
"""
|
135 |
+
return self.__enter__()
|
136 |
+
|
137 |
+
def close(self):
|
138 |
+
"""Close all encapsulated file objects"""
|
139 |
+
for f in reversed(self.fobjects):
|
140 |
+
if "r" not in self.mode and not f.closed:
|
141 |
+
f.flush()
|
142 |
+
f.close()
|
143 |
+
self.fobjects.clear()
|
144 |
+
|
145 |
+
|
146 |
+
class OpenFiles(list):
|
147 |
+
"""List of OpenFile instances
|
148 |
+
|
149 |
+
Can be used in a single context, which opens and closes all of the
|
150 |
+
contained files. Normal list access to get the elements works as
|
151 |
+
normal.
|
152 |
+
|
153 |
+
A special case is made for caching filesystems - the files will
|
154 |
+
be down/uploaded together at the start or end of the context, and
|
155 |
+
this may happen concurrently, if the target filesystem supports it.
|
156 |
+
"""
|
157 |
+
|
158 |
+
def __init__(self, *args, mode="rb", fs=None):
|
159 |
+
self.mode = mode
|
160 |
+
self.fs = fs
|
161 |
+
self.files = []
|
162 |
+
super().__init__(*args)
|
163 |
+
|
164 |
+
def __enter__(self):
|
165 |
+
if self.fs is None:
|
166 |
+
raise ValueError("Context has already been used")
|
167 |
+
|
168 |
+
fs = self.fs
|
169 |
+
while True:
|
170 |
+
if hasattr(fs, "open_many"):
|
171 |
+
# check for concurrent cache download; or set up for upload
|
172 |
+
self.files = fs.open_many(self)
|
173 |
+
return self.files
|
174 |
+
if hasattr(fs, "fs") and fs.fs is not None:
|
175 |
+
fs = fs.fs
|
176 |
+
else:
|
177 |
+
break
|
178 |
+
return [s.__enter__() for s in self]
|
179 |
+
|
180 |
+
def __exit__(self, *args):
|
181 |
+
fs = self.fs
|
182 |
+
[s.__exit__(*args) for s in self]
|
183 |
+
if "r" not in self.mode:
|
184 |
+
while True:
|
185 |
+
if hasattr(fs, "open_many"):
|
186 |
+
# check for concurrent cache upload
|
187 |
+
fs.commit_many(self.files)
|
188 |
+
return
|
189 |
+
if hasattr(fs, "fs") and fs.fs is not None:
|
190 |
+
fs = fs.fs
|
191 |
+
else:
|
192 |
+
break
|
193 |
+
|
194 |
+
def __getitem__(self, item):
|
195 |
+
out = super().__getitem__(item)
|
196 |
+
if isinstance(item, slice):
|
197 |
+
return OpenFiles(out, mode=self.mode, fs=self.fs)
|
198 |
+
return out
|
199 |
+
|
200 |
+
def __repr__(self):
|
201 |
+
return f"<List of {len(self)} OpenFile instances>"
|
202 |
+
|
203 |
+
|
204 |
+
def open_files(
|
205 |
+
urlpath,
|
206 |
+
mode="rb",
|
207 |
+
compression=None,
|
208 |
+
encoding="utf8",
|
209 |
+
errors=None,
|
210 |
+
name_function=None,
|
211 |
+
num=1,
|
212 |
+
protocol=None,
|
213 |
+
newline=None,
|
214 |
+
auto_mkdir=True,
|
215 |
+
expand=True,
|
216 |
+
**kwargs,
|
217 |
+
):
|
218 |
+
"""Given a path or paths, return a list of ``OpenFile`` objects.
|
219 |
+
|
220 |
+
For writing, a str path must contain the "*" character, which will be filled
|
221 |
+
in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
|
222 |
+
|
223 |
+
For either reading or writing, can instead provide explicit list of paths.
|
224 |
+
|
225 |
+
Parameters
|
226 |
+
----------
|
227 |
+
urlpath: string or list
|
228 |
+
Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
|
229 |
+
to read from alternative filesystems. To read from multiple files you
|
230 |
+
can pass a globstring or a list of paths, with the caveat that they
|
231 |
+
must all have the same protocol.
|
232 |
+
mode: 'rb', 'wt', etc.
|
233 |
+
compression: string or None
|
234 |
+
If given, open file using compression codec. Can either be a compression
|
235 |
+
name (a key in ``fsspec.compression.compr``) or "infer" to guess the
|
236 |
+
compression from the filename suffix.
|
237 |
+
encoding: str
|
238 |
+
For text mode only
|
239 |
+
errors: None or str
|
240 |
+
Passed to TextIOWrapper in text mode
|
241 |
+
name_function: function or None
|
242 |
+
if opening a set of files for writing, those files do not yet exist,
|
243 |
+
so we need to generate their names by formatting the urlpath for
|
244 |
+
each sequence number
|
245 |
+
num: int [1]
|
246 |
+
if writing mode, number of files we expect to create (passed to
|
247 |
+
name+function)
|
248 |
+
protocol: str or None
|
249 |
+
If given, overrides the protocol found in the URL.
|
250 |
+
newline: bytes or None
|
251 |
+
Used for line terminator in text mode. If None, uses system default;
|
252 |
+
if blank, uses no translation.
|
253 |
+
auto_mkdir: bool (True)
|
254 |
+
If in write mode, this will ensure the target directory exists before
|
255 |
+
writing, by calling ``fs.mkdirs(exist_ok=True)``.
|
256 |
+
expand: bool
|
257 |
+
**kwargs: dict
|
258 |
+
Extra options that make sense to a particular storage connection, e.g.
|
259 |
+
host, port, username, password, etc.
|
260 |
+
|
261 |
+
Examples
|
262 |
+
--------
|
263 |
+
>>> files = open_files('2015-*-*.csv') # doctest: +SKIP
|
264 |
+
>>> files = open_files(
|
265 |
+
... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
|
266 |
+
... ) # doctest: +SKIP
|
267 |
+
|
268 |
+
Returns
|
269 |
+
-------
|
270 |
+
An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
|
271 |
+
be used as a single context
|
272 |
+
|
273 |
+
Notes
|
274 |
+
-----
|
275 |
+
For a full list of the available protocols and the implementations that
|
276 |
+
they map across to see the latest online documentation:
|
277 |
+
|
278 |
+
- For implementations built into ``fsspec`` see
|
279 |
+
https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
|
280 |
+
- For implementations in separate packages see
|
281 |
+
https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
|
282 |
+
"""
|
283 |
+
fs, fs_token, paths = get_fs_token_paths(
|
284 |
+
urlpath,
|
285 |
+
mode,
|
286 |
+
num=num,
|
287 |
+
name_function=name_function,
|
288 |
+
storage_options=kwargs,
|
289 |
+
protocol=protocol,
|
290 |
+
expand=expand,
|
291 |
+
)
|
292 |
+
if fs.protocol == "file":
|
293 |
+
fs.auto_mkdir = auto_mkdir
|
294 |
+
elif "r" not in mode and auto_mkdir:
|
295 |
+
parents = {fs._parent(path) for path in paths}
|
296 |
+
for parent in parents:
|
297 |
+
try:
|
298 |
+
fs.makedirs(parent, exist_ok=True)
|
299 |
+
except PermissionError:
|
300 |
+
pass
|
301 |
+
return OpenFiles(
|
302 |
+
[
|
303 |
+
OpenFile(
|
304 |
+
fs,
|
305 |
+
path,
|
306 |
+
mode=mode,
|
307 |
+
compression=compression,
|
308 |
+
encoding=encoding,
|
309 |
+
errors=errors,
|
310 |
+
newline=newline,
|
311 |
+
)
|
312 |
+
for path in paths
|
313 |
+
],
|
314 |
+
mode=mode,
|
315 |
+
fs=fs,
|
316 |
+
)
|
317 |
+
|
318 |
+
|
319 |
+
def _un_chain(path, kwargs):
|
320 |
+
x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
|
321 |
+
bits = (
|
322 |
+
[p if "://" in p or x.match(p) else p + "://" for p in path.split("::")]
|
323 |
+
if "::" in path
|
324 |
+
else [path]
|
325 |
+
)
|
326 |
+
# [[url, protocol, kwargs], ...]
|
327 |
+
out = []
|
328 |
+
previous_bit = None
|
329 |
+
kwargs = kwargs.copy()
|
330 |
+
for bit in reversed(bits):
|
331 |
+
protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
|
332 |
+
cls = get_filesystem_class(protocol)
|
333 |
+
extra_kwargs = cls._get_kwargs_from_urls(bit)
|
334 |
+
kws = kwargs.pop(protocol, {})
|
335 |
+
if bit is bits[0]:
|
336 |
+
kws.update(kwargs)
|
337 |
+
kw = dict(**extra_kwargs, **kws)
|
338 |
+
bit = cls._strip_protocol(bit)
|
339 |
+
if (
|
340 |
+
protocol in {"blockcache", "filecache", "simplecache"}
|
341 |
+
and "target_protocol" not in kw
|
342 |
+
):
|
343 |
+
bit = previous_bit
|
344 |
+
out.append((bit, protocol, kw))
|
345 |
+
previous_bit = bit
|
346 |
+
out = list(reversed(out))
|
347 |
+
return out
|
348 |
+
|
349 |
+
|
350 |
+
def url_to_fs(url, **kwargs):
|
351 |
+
"""
|
352 |
+
Turn fully-qualified and potentially chained URL into filesystem instance
|
353 |
+
|
354 |
+
Parameters
|
355 |
+
----------
|
356 |
+
url : str
|
357 |
+
The fsspec-compatible URL
|
358 |
+
**kwargs: dict
|
359 |
+
Extra options that make sense to a particular storage connection, e.g.
|
360 |
+
host, port, username, password, etc.
|
361 |
+
|
362 |
+
Returns
|
363 |
+
-------
|
364 |
+
filesystem : FileSystem
|
365 |
+
The new filesystem discovered from ``url`` and created with
|
366 |
+
``**kwargs``.
|
367 |
+
urlpath : str
|
368 |
+
The file-systems-specific URL for ``url``.
|
369 |
+
"""
|
370 |
+
# non-FS arguments that appear in fsspec.open()
|
371 |
+
# inspect could keep this in sync with open()'s signature
|
372 |
+
known_kwargs = {
|
373 |
+
"compression",
|
374 |
+
"encoding",
|
375 |
+
"errors",
|
376 |
+
"expand",
|
377 |
+
"mode",
|
378 |
+
"name_function",
|
379 |
+
"newline",
|
380 |
+
"num",
|
381 |
+
}
|
382 |
+
kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs}
|
383 |
+
chain = _un_chain(url, kwargs)
|
384 |
+
inkwargs = {}
|
385 |
+
# Reverse iterate the chain, creating a nested target_* structure
|
386 |
+
for i, ch in enumerate(reversed(chain)):
|
387 |
+
urls, protocol, kw = ch
|
388 |
+
if i == len(chain) - 1:
|
389 |
+
inkwargs = dict(**kw, **inkwargs)
|
390 |
+
continue
|
391 |
+
inkwargs["target_options"] = dict(**kw, **inkwargs)
|
392 |
+
inkwargs["target_protocol"] = protocol
|
393 |
+
inkwargs["fo"] = urls
|
394 |
+
urlpath, protocol, _ = chain[0]
|
395 |
+
fs = filesystem(protocol, **inkwargs)
|
396 |
+
return fs, urlpath
|
397 |
+
|
398 |
+
|
399 |
+
def open(
|
400 |
+
urlpath,
|
401 |
+
mode="rb",
|
402 |
+
compression=None,
|
403 |
+
encoding="utf8",
|
404 |
+
errors=None,
|
405 |
+
protocol=None,
|
406 |
+
newline=None,
|
407 |
+
**kwargs,
|
408 |
+
):
|
409 |
+
"""Given a path or paths, return one ``OpenFile`` object.
|
410 |
+
|
411 |
+
Parameters
|
412 |
+
----------
|
413 |
+
urlpath: string or list
|
414 |
+
Absolute or relative filepath. Prefix with a protocol like ``s3://``
|
415 |
+
to read from alternative filesystems. Should not include glob
|
416 |
+
character(s).
|
417 |
+
mode: 'rb', 'wt', etc.
|
418 |
+
compression: string or None
|
419 |
+
If given, open file using compression codec. Can either be a compression
|
420 |
+
name (a key in ``fsspec.compression.compr``) or "infer" to guess the
|
421 |
+
compression from the filename suffix.
|
422 |
+
encoding: str
|
423 |
+
For text mode only
|
424 |
+
errors: None or str
|
425 |
+
Passed to TextIOWrapper in text mode
|
426 |
+
protocol: str or None
|
427 |
+
If given, overrides the protocol found in the URL.
|
428 |
+
newline: bytes or None
|
429 |
+
Used for line terminator in text mode. If None, uses system default;
|
430 |
+
if blank, uses no translation.
|
431 |
+
**kwargs: dict
|
432 |
+
Extra options that make sense to a particular storage connection, e.g.
|
433 |
+
host, port, username, password, etc.
|
434 |
+
|
435 |
+
Examples
|
436 |
+
--------
|
437 |
+
>>> openfile = open('2015-01-01.csv') # doctest: +SKIP
|
438 |
+
>>> openfile = open(
|
439 |
+
... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
|
440 |
+
... ) # doctest: +SKIP
|
441 |
+
>>> with openfile as f:
|
442 |
+
... df = pd.read_csv(f) # doctest: +SKIP
|
443 |
+
...
|
444 |
+
|
445 |
+
Returns
|
446 |
+
-------
|
447 |
+
``OpenFile`` object.
|
448 |
+
|
449 |
+
Notes
|
450 |
+
-----
|
451 |
+
For a full list of the available protocols and the implementations that
|
452 |
+
they map across to see the latest online documentation:
|
453 |
+
|
454 |
+
- For implementations built into ``fsspec`` see
|
455 |
+
https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
|
456 |
+
- For implementations in separate packages see
|
457 |
+
https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
|
458 |
+
"""
|
459 |
+
out = open_files(
|
460 |
+
urlpath=[urlpath],
|
461 |
+
mode=mode,
|
462 |
+
compression=compression,
|
463 |
+
encoding=encoding,
|
464 |
+
errors=errors,
|
465 |
+
protocol=protocol,
|
466 |
+
newline=newline,
|
467 |
+
expand=False,
|
468 |
+
**kwargs,
|
469 |
+
)
|
470 |
+
if not out:
|
471 |
+
raise FileNotFoundError(urlpath)
|
472 |
+
return out[0]
|
473 |
+
|
474 |
+
|
475 |
+
def open_local(
|
476 |
+
url: str | list[str] | Path | list[Path],
|
477 |
+
mode: str = "rb",
|
478 |
+
**storage_options: dict,
|
479 |
+
) -> str | list[str]:
|
480 |
+
"""Open file(s) which can be resolved to local
|
481 |
+
|
482 |
+
For files which either are local, or get downloaded upon open
|
483 |
+
(e.g., by file caching)
|
484 |
+
|
485 |
+
Parameters
|
486 |
+
----------
|
487 |
+
url: str or list(str)
|
488 |
+
mode: str
|
489 |
+
Must be read mode
|
490 |
+
storage_options:
|
491 |
+
passed on to FS for or used by open_files (e.g., compression)
|
492 |
+
"""
|
493 |
+
if "r" not in mode:
|
494 |
+
raise ValueError("Can only ensure local files when reading")
|
495 |
+
of = open_files(url, mode=mode, **storage_options)
|
496 |
+
if not getattr(of[0].fs, "local_file", False):
|
497 |
+
raise ValueError(
|
498 |
+
"open_local can only be used on a filesystem which"
|
499 |
+
" has attribute local_file=True"
|
500 |
+
)
|
501 |
+
with of as files:
|
502 |
+
paths = [f.name for f in files]
|
503 |
+
if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path):
|
504 |
+
return paths[0]
|
505 |
+
return paths
|
506 |
+
|
507 |
+
|
508 |
+
def get_compression(urlpath, compression):
|
509 |
+
if compression == "infer":
|
510 |
+
compression = infer_compression(urlpath)
|
511 |
+
if compression is not None and compression not in compr:
|
512 |
+
raise ValueError(f"Compression type {compression} not supported")
|
513 |
+
return compression
|
514 |
+
|
515 |
+
|
516 |
+
def split_protocol(urlpath):
|
517 |
+
"""Return protocol, path pair"""
|
518 |
+
urlpath = stringify_path(urlpath)
|
519 |
+
if "://" in urlpath:
|
520 |
+
protocol, path = urlpath.split("://", 1)
|
521 |
+
if len(protocol) > 1:
|
522 |
+
# excludes Windows paths
|
523 |
+
return protocol, path
|
524 |
+
if urlpath.startswith("data:"):
|
525 |
+
return urlpath.split(":", 1)
|
526 |
+
return None, urlpath
|
527 |
+
|
528 |
+
|
529 |
+
def strip_protocol(urlpath):
|
530 |
+
"""Return only path part of full URL, according to appropriate backend"""
|
531 |
+
protocol, _ = split_protocol(urlpath)
|
532 |
+
cls = get_filesystem_class(protocol)
|
533 |
+
return cls._strip_protocol(urlpath)
|
534 |
+
|
535 |
+
|
536 |
+
def expand_paths_if_needed(paths, mode, num, fs, name_function):
|
537 |
+
"""Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
|
538 |
+
in them (read mode).
|
539 |
+
|
540 |
+
:param paths: list of paths
|
541 |
+
mode: str
|
542 |
+
Mode in which to open files.
|
543 |
+
num: int
|
544 |
+
If opening in writing mode, number of files we expect to create.
|
545 |
+
fs: filesystem object
|
546 |
+
name_function: callable
|
547 |
+
If opening in writing mode, this callable is used to generate path
|
548 |
+
names. Names are generated for each partition by
|
549 |
+
``urlpath.replace('*', name_function(partition_index))``.
|
550 |
+
:return: list of paths
|
551 |
+
"""
|
552 |
+
expanded_paths = []
|
553 |
+
paths = list(paths)
|
554 |
+
|
555 |
+
if "w" in mode: # read mode
|
556 |
+
if sum([1 for p in paths if "*" in p]) > 1:
|
557 |
+
raise ValueError(
|
558 |
+
"When writing data, only one filename mask can be specified."
|
559 |
+
)
|
560 |
+
num = max(num, len(paths))
|
561 |
+
|
562 |
+
for curr_path in paths:
|
563 |
+
if "*" in curr_path:
|
564 |
+
# expand using name_function
|
565 |
+
expanded_paths.extend(_expand_paths(curr_path, name_function, num))
|
566 |
+
else:
|
567 |
+
expanded_paths.append(curr_path)
|
568 |
+
# if we generated more paths that asked for, trim the list
|
569 |
+
if len(expanded_paths) > num:
|
570 |
+
expanded_paths = expanded_paths[:num]
|
571 |
+
|
572 |
+
else: # read mode
|
573 |
+
for curr_path in paths:
|
574 |
+
if has_magic(curr_path):
|
575 |
+
# expand using glob
|
576 |
+
expanded_paths.extend(fs.glob(curr_path))
|
577 |
+
else:
|
578 |
+
expanded_paths.append(curr_path)
|
579 |
+
|
580 |
+
return expanded_paths
|
581 |
+
|
582 |
+
|
583 |
+
def get_fs_token_paths(
|
584 |
+
urlpath,
|
585 |
+
mode="rb",
|
586 |
+
num=1,
|
587 |
+
name_function=None,
|
588 |
+
storage_options=None,
|
589 |
+
protocol=None,
|
590 |
+
expand=True,
|
591 |
+
):
|
592 |
+
"""Filesystem, deterministic token, and paths from a urlpath and options.
|
593 |
+
|
594 |
+
Parameters
|
595 |
+
----------
|
596 |
+
urlpath: string or iterable
|
597 |
+
Absolute or relative filepath, URL (may include protocols like
|
598 |
+
``s3://``), or globstring pointing to data.
|
599 |
+
mode: str, optional
|
600 |
+
Mode in which to open files.
|
601 |
+
num: int, optional
|
602 |
+
If opening in writing mode, number of files we expect to create.
|
603 |
+
name_function: callable, optional
|
604 |
+
If opening in writing mode, this callable is used to generate path
|
605 |
+
names. Names are generated for each partition by
|
606 |
+
``urlpath.replace('*', name_function(partition_index))``.
|
607 |
+
storage_options: dict, optional
|
608 |
+
Additional keywords to pass to the filesystem class.
|
609 |
+
protocol: str or None
|
610 |
+
To override the protocol specifier in the URL
|
611 |
+
expand: bool
|
612 |
+
Expand string paths for writing, assuming the path is a directory
|
613 |
+
"""
|
614 |
+
if isinstance(urlpath, (list, tuple, set)):
|
615 |
+
if not urlpath:
|
616 |
+
raise ValueError("empty urlpath sequence")
|
617 |
+
urlpath0 = stringify_path(list(urlpath)[0])
|
618 |
+
else:
|
619 |
+
urlpath0 = stringify_path(urlpath)
|
620 |
+
storage_options = storage_options or {}
|
621 |
+
if protocol:
|
622 |
+
storage_options["protocol"] = protocol
|
623 |
+
chain = _un_chain(urlpath0, storage_options or {})
|
624 |
+
inkwargs = {}
|
625 |
+
# Reverse iterate the chain, creating a nested target_* structure
|
626 |
+
for i, ch in enumerate(reversed(chain)):
|
627 |
+
urls, nested_protocol, kw = ch
|
628 |
+
if i == len(chain) - 1:
|
629 |
+
inkwargs = dict(**kw, **inkwargs)
|
630 |
+
continue
|
631 |
+
inkwargs["target_options"] = dict(**kw, **inkwargs)
|
632 |
+
inkwargs["target_protocol"] = nested_protocol
|
633 |
+
inkwargs["fo"] = urls
|
634 |
+
paths, protocol, _ = chain[0]
|
635 |
+
fs = filesystem(protocol, **inkwargs)
|
636 |
+
if isinstance(urlpath, (list, tuple, set)):
|
637 |
+
pchains = [
|
638 |
+
_un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
|
639 |
+
]
|
640 |
+
if len({pc[1] for pc in pchains}) > 1:
|
641 |
+
raise ValueError("Protocol mismatch getting fs from %s", urlpath)
|
642 |
+
paths = [pc[0] for pc in pchains]
|
643 |
+
else:
|
644 |
+
paths = fs._strip_protocol(paths)
|
645 |
+
if isinstance(paths, (list, tuple, set)):
|
646 |
+
paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
|
647 |
+
else:
|
648 |
+
if "w" in mode and expand:
|
649 |
+
paths = _expand_paths(paths, name_function, num)
|
650 |
+
elif "x" in mode and expand:
|
651 |
+
paths = _expand_paths(paths, name_function, num)
|
652 |
+
elif "*" in paths:
|
653 |
+
paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
|
654 |
+
else:
|
655 |
+
paths = [paths]
|
656 |
+
|
657 |
+
return fs, fs._fs_token, paths
|
658 |
+
|
659 |
+
|
660 |
+
def _expand_paths(path, name_function, num):
|
661 |
+
if isinstance(path, str):
|
662 |
+
if path.count("*") > 1:
|
663 |
+
raise ValueError("Output path spec must contain exactly one '*'.")
|
664 |
+
elif "*" not in path:
|
665 |
+
path = os.path.join(path, "*.part")
|
666 |
+
|
667 |
+
if name_function is None:
|
668 |
+
name_function = build_name_function(num - 1)
|
669 |
+
|
670 |
+
paths = [path.replace("*", name_function(i)) for i in range(num)]
|
671 |
+
if paths != sorted(paths):
|
672 |
+
logger.warning(
|
673 |
+
"In order to preserve order between partitions"
|
674 |
+
" paths created with ``name_function`` should "
|
675 |
+
"sort to partition order"
|
676 |
+
)
|
677 |
+
elif isinstance(path, (tuple, list)):
|
678 |
+
assert len(path) == num
|
679 |
+
paths = list(path)
|
680 |
+
else:
|
681 |
+
raise ValueError(
|
682 |
+
"Path should be either\n"
|
683 |
+
"1. A list of paths: ['foo.json', 'bar.json', ...]\n"
|
684 |
+
"2. A directory: 'foo/\n"
|
685 |
+
"3. A path with a '*' in it: 'foo.*.json'"
|
686 |
+
)
|
687 |
+
return paths
|
688 |
+
|
689 |
+
|
690 |
+
class PickleableTextIOWrapper(io.TextIOWrapper):
|
691 |
+
"""TextIOWrapper cannot be pickled. This solves it.
|
692 |
+
|
693 |
+
Requires that ``buffer`` be pickleable, which all instances of
|
694 |
+
AbstractBufferedFile are.
|
695 |
+
"""
|
696 |
+
|
697 |
+
def __init__(
|
698 |
+
self,
|
699 |
+
buffer,
|
700 |
+
encoding=None,
|
701 |
+
errors=None,
|
702 |
+
newline=None,
|
703 |
+
line_buffering=False,
|
704 |
+
write_through=False,
|
705 |
+
):
|
706 |
+
self.args = buffer, encoding, errors, newline, line_buffering, write_through
|
707 |
+
super().__init__(*self.args)
|
708 |
+
|
709 |
+
def __reduce__(self):
|
710 |
+
return PickleableTextIOWrapper, self.args
|
env-llmeval/lib/python3.10/site-packages/fsspec/dircache.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from collections.abc import MutableMapping
|
3 |
+
from functools import lru_cache
|
4 |
+
|
5 |
+
|
6 |
+
class DirCache(MutableMapping):
|
7 |
+
"""
|
8 |
+
Caching of directory listings, in a structure like::
|
9 |
+
|
10 |
+
{"path0": [
|
11 |
+
{"name": "path0/file0",
|
12 |
+
"size": 123,
|
13 |
+
"type": "file",
|
14 |
+
...
|
15 |
+
},
|
16 |
+
{"name": "path0/file1",
|
17 |
+
},
|
18 |
+
...
|
19 |
+
],
|
20 |
+
"path1": [...]
|
21 |
+
}
|
22 |
+
|
23 |
+
Parameters to this class control listing expiry or indeed turn
|
24 |
+
caching off
|
25 |
+
"""
|
26 |
+
|
27 |
+
def __init__(
|
28 |
+
self,
|
29 |
+
use_listings_cache=True,
|
30 |
+
listings_expiry_time=None,
|
31 |
+
max_paths=None,
|
32 |
+
**kwargs,
|
33 |
+
):
|
34 |
+
"""
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
use_listings_cache: bool
|
39 |
+
If False, this cache never returns items, but always reports KeyError,
|
40 |
+
and setting items has no effect
|
41 |
+
listings_expiry_time: int or float (optional)
|
42 |
+
Time in seconds that a listing is considered valid. If None,
|
43 |
+
listings do not expire.
|
44 |
+
max_paths: int (optional)
|
45 |
+
The number of most recent listings that are considered valid; 'recent'
|
46 |
+
refers to when the entry was set.
|
47 |
+
"""
|
48 |
+
self._cache = {}
|
49 |
+
self._times = {}
|
50 |
+
if max_paths:
|
51 |
+
self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None))
|
52 |
+
self.use_listings_cache = use_listings_cache
|
53 |
+
self.listings_expiry_time = listings_expiry_time
|
54 |
+
self.max_paths = max_paths
|
55 |
+
|
56 |
+
def __getitem__(self, item):
|
57 |
+
if self.listings_expiry_time is not None:
|
58 |
+
if self._times.get(item, 0) - time.time() < -self.listings_expiry_time:
|
59 |
+
del self._cache[item]
|
60 |
+
if self.max_paths:
|
61 |
+
self._q(item)
|
62 |
+
return self._cache[item] # maybe raises KeyError
|
63 |
+
|
64 |
+
def clear(self):
|
65 |
+
self._cache.clear()
|
66 |
+
|
67 |
+
def __len__(self):
|
68 |
+
return len(self._cache)
|
69 |
+
|
70 |
+
def __contains__(self, item):
|
71 |
+
try:
|
72 |
+
self[item]
|
73 |
+
return True
|
74 |
+
except KeyError:
|
75 |
+
return False
|
76 |
+
|
77 |
+
def __setitem__(self, key, value):
|
78 |
+
if not self.use_listings_cache:
|
79 |
+
return
|
80 |
+
if self.max_paths:
|
81 |
+
self._q(key)
|
82 |
+
self._cache[key] = value
|
83 |
+
if self.listings_expiry_time is not None:
|
84 |
+
self._times[key] = time.time()
|
85 |
+
|
86 |
+
def __delitem__(self, key):
|
87 |
+
del self._cache[key]
|
88 |
+
|
89 |
+
def __iter__(self):
|
90 |
+
entries = list(self._cache)
|
91 |
+
|
92 |
+
return (k for k in entries if k in self)
|
93 |
+
|
94 |
+
def __reduce__(self):
|
95 |
+
return (
|
96 |
+
DirCache,
|
97 |
+
(self.use_listings_cache, self.listings_expiry_time, self.max_paths),
|
98 |
+
)
|
env-llmeval/lib/python3.10/site-packages/fsspec/exceptions.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
fsspec user-defined exception classes
|
3 |
+
"""
|
4 |
+
import asyncio
|
5 |
+
|
6 |
+
|
7 |
+
class BlocksizeMismatchError(ValueError):
|
8 |
+
"""
|
9 |
+
Raised when a cached file is opened with a different blocksize than it was
|
10 |
+
written with
|
11 |
+
"""
|
12 |
+
|
13 |
+
|
14 |
+
class FSTimeoutError(asyncio.TimeoutError):
|
15 |
+
"""
|
16 |
+
Raised when a fsspec function timed out occurs
|
17 |
+
"""
|
env-llmeval/lib/python3.10/site-packages/fsspec/fuse.py
ADDED
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
import stat
|
5 |
+
import threading
|
6 |
+
import time
|
7 |
+
from errno import EIO, ENOENT
|
8 |
+
|
9 |
+
from fuse import FUSE, FuseOSError, LoggingMixIn, Operations
|
10 |
+
|
11 |
+
from fsspec import __version__
|
12 |
+
from fsspec.core import url_to_fs
|
13 |
+
|
14 |
+
logger = logging.getLogger("fsspec.fuse")
|
15 |
+
|
16 |
+
|
17 |
+
class FUSEr(Operations):
|
18 |
+
def __init__(self, fs, path, ready_file=False):
|
19 |
+
self.fs = fs
|
20 |
+
self.cache = {}
|
21 |
+
self.root = path.rstrip("/") + "/"
|
22 |
+
self.counter = 0
|
23 |
+
logger.info("Starting FUSE at %s", path)
|
24 |
+
self._ready_file = ready_file
|
25 |
+
|
26 |
+
def getattr(self, path, fh=None):
|
27 |
+
logger.debug("getattr %s", path)
|
28 |
+
if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
|
29 |
+
return {"type": "file", "st_size": 5}
|
30 |
+
|
31 |
+
path = "".join([self.root, path.lstrip("/")]).rstrip("/")
|
32 |
+
try:
|
33 |
+
info = self.fs.info(path)
|
34 |
+
except FileNotFoundError:
|
35 |
+
raise FuseOSError(ENOENT)
|
36 |
+
|
37 |
+
data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)}
|
38 |
+
perm = info.get("mode", 0o777)
|
39 |
+
|
40 |
+
if info["type"] != "file":
|
41 |
+
data["st_mode"] = stat.S_IFDIR | perm
|
42 |
+
data["st_size"] = 0
|
43 |
+
data["st_blksize"] = 0
|
44 |
+
else:
|
45 |
+
data["st_mode"] = stat.S_IFREG | perm
|
46 |
+
data["st_size"] = info["size"]
|
47 |
+
data["st_blksize"] = 5 * 2**20
|
48 |
+
data["st_nlink"] = 1
|
49 |
+
data["st_atime"] = info["atime"] if "atime" in info else time.time()
|
50 |
+
data["st_ctime"] = info["ctime"] if "ctime" in info else time.time()
|
51 |
+
data["st_mtime"] = info["mtime"] if "mtime" in info else time.time()
|
52 |
+
return data
|
53 |
+
|
54 |
+
def readdir(self, path, fh):
|
55 |
+
logger.debug("readdir %s", path)
|
56 |
+
path = "".join([self.root, path.lstrip("/")])
|
57 |
+
files = self.fs.ls(path, False)
|
58 |
+
files = [os.path.basename(f.rstrip("/")) for f in files]
|
59 |
+
return [".", ".."] + files
|
60 |
+
|
61 |
+
def mkdir(self, path, mode):
|
62 |
+
path = "".join([self.root, path.lstrip("/")])
|
63 |
+
self.fs.mkdir(path)
|
64 |
+
return 0
|
65 |
+
|
66 |
+
def rmdir(self, path):
|
67 |
+
path = "".join([self.root, path.lstrip("/")])
|
68 |
+
self.fs.rmdir(path)
|
69 |
+
return 0
|
70 |
+
|
71 |
+
def read(self, path, size, offset, fh):
|
72 |
+
logger.debug("read %s", (path, size, offset))
|
73 |
+
if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
|
74 |
+
# status indicator
|
75 |
+
return b"ready"
|
76 |
+
|
77 |
+
f = self.cache[fh]
|
78 |
+
f.seek(offset)
|
79 |
+
out = f.read(size)
|
80 |
+
return out
|
81 |
+
|
82 |
+
def write(self, path, data, offset, fh):
|
83 |
+
logger.debug("write %s", (path, offset))
|
84 |
+
f = self.cache[fh]
|
85 |
+
f.seek(offset)
|
86 |
+
f.write(data)
|
87 |
+
return len(data)
|
88 |
+
|
89 |
+
def create(self, path, flags, fi=None):
|
90 |
+
logger.debug("create %s", (path, flags))
|
91 |
+
fn = "".join([self.root, path.lstrip("/")])
|
92 |
+
self.fs.touch(fn) # OS will want to get attributes immediately
|
93 |
+
f = self.fs.open(fn, "wb")
|
94 |
+
self.cache[self.counter] = f
|
95 |
+
self.counter += 1
|
96 |
+
return self.counter - 1
|
97 |
+
|
98 |
+
def open(self, path, flags):
|
99 |
+
logger.debug("open %s", (path, flags))
|
100 |
+
fn = "".join([self.root, path.lstrip("/")])
|
101 |
+
if flags % 2 == 0:
|
102 |
+
# read
|
103 |
+
mode = "rb"
|
104 |
+
else:
|
105 |
+
# write/create
|
106 |
+
mode = "wb"
|
107 |
+
self.cache[self.counter] = self.fs.open(fn, mode)
|
108 |
+
self.counter += 1
|
109 |
+
return self.counter - 1
|
110 |
+
|
111 |
+
def truncate(self, path, length, fh=None):
|
112 |
+
fn = "".join([self.root, path.lstrip("/")])
|
113 |
+
if length != 0:
|
114 |
+
raise NotImplementedError
|
115 |
+
# maybe should be no-op since open with write sets size to zero anyway
|
116 |
+
self.fs.touch(fn)
|
117 |
+
|
118 |
+
def unlink(self, path):
|
119 |
+
fn = "".join([self.root, path.lstrip("/")])
|
120 |
+
try:
|
121 |
+
self.fs.rm(fn, False)
|
122 |
+
except (OSError, FileNotFoundError):
|
123 |
+
raise FuseOSError(EIO)
|
124 |
+
|
125 |
+
def release(self, path, fh):
|
126 |
+
try:
|
127 |
+
if fh in self.cache:
|
128 |
+
f = self.cache[fh]
|
129 |
+
f.close()
|
130 |
+
self.cache.pop(fh)
|
131 |
+
except Exception as e:
|
132 |
+
print(e)
|
133 |
+
return 0
|
134 |
+
|
135 |
+
def chmod(self, path, mode):
|
136 |
+
if hasattr(self.fs, "chmod"):
|
137 |
+
path = "".join([self.root, path.lstrip("/")])
|
138 |
+
return self.fs.chmod(path, mode)
|
139 |
+
raise NotImplementedError
|
140 |
+
|
141 |
+
|
142 |
+
def run(
|
143 |
+
fs,
|
144 |
+
path,
|
145 |
+
mount_point,
|
146 |
+
foreground=True,
|
147 |
+
threads=False,
|
148 |
+
ready_file=False,
|
149 |
+
ops_class=FUSEr,
|
150 |
+
):
|
151 |
+
"""Mount stuff in a local directory
|
152 |
+
|
153 |
+
This uses fusepy to make it appear as if a given path on an fsspec
|
154 |
+
instance is in fact resident within the local file-system.
|
155 |
+
|
156 |
+
This requires that fusepy by installed, and that FUSE be available on
|
157 |
+
the system (typically requiring a package to be installed with
|
158 |
+
apt, yum, brew, etc.).
|
159 |
+
|
160 |
+
Parameters
|
161 |
+
----------
|
162 |
+
fs: file-system instance
|
163 |
+
From one of the compatible implementations
|
164 |
+
path: str
|
165 |
+
Location on that file-system to regard as the root directory to
|
166 |
+
mount. Note that you typically should include the terminating "/"
|
167 |
+
character.
|
168 |
+
mount_point: str
|
169 |
+
An empty directory on the local file-system where the contents of
|
170 |
+
the remote path will appear.
|
171 |
+
foreground: bool
|
172 |
+
Whether or not calling this function will block. Operation will
|
173 |
+
typically be more stable if True.
|
174 |
+
threads: bool
|
175 |
+
Whether or not to create threads when responding to file operations
|
176 |
+
within the mounter directory. Operation will typically be more
|
177 |
+
stable if False.
|
178 |
+
ready_file: bool
|
179 |
+
Whether the FUSE process is ready. The ``.fuse_ready`` file will
|
180 |
+
exist in the ``mount_point`` directory if True. Debugging purpose.
|
181 |
+
ops_class: FUSEr or Subclass of FUSEr
|
182 |
+
To override the default behavior of FUSEr. For Example, logging
|
183 |
+
to file.
|
184 |
+
|
185 |
+
"""
|
186 |
+
func = lambda: FUSE(
|
187 |
+
ops_class(fs, path, ready_file=ready_file),
|
188 |
+
mount_point,
|
189 |
+
nothreads=not threads,
|
190 |
+
foreground=foreground,
|
191 |
+
)
|
192 |
+
if not foreground:
|
193 |
+
th = threading.Thread(target=func)
|
194 |
+
th.daemon = True
|
195 |
+
th.start()
|
196 |
+
return th
|
197 |
+
else: # pragma: no cover
|
198 |
+
try:
|
199 |
+
func()
|
200 |
+
except KeyboardInterrupt:
|
201 |
+
pass
|
202 |
+
|
203 |
+
|
204 |
+
def main(args):
|
205 |
+
"""Mount filesystem from chained URL to MOUNT_POINT.
|
206 |
+
|
207 |
+
Examples:
|
208 |
+
|
209 |
+
python3 -m fsspec.fuse memory /usr/share /tmp/mem
|
210 |
+
|
211 |
+
python3 -m fsspec.fuse local /tmp/source /tmp/local \\
|
212 |
+
-l /tmp/fsspecfuse.log
|
213 |
+
|
214 |
+
You can also mount chained-URLs and use special settings:
|
215 |
+
|
216 |
+
python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\
|
217 |
+
/ /tmp/zip \\
|
218 |
+
-o 'filecache-cache_storage=/tmp/simplecache'
|
219 |
+
|
220 |
+
You can specify the type of the setting by using `[int]` or `[bool]`,
|
221 |
+
(`true`, `yes`, `1` represents the Boolean value `True`):
|
222 |
+
|
223 |
+
python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\
|
224 |
+
/historic/packages/RPMS /tmp/ftp \\
|
225 |
+
-o 'simplecache-cache_storage=/tmp/simplecache' \\
|
226 |
+
-o 'simplecache-check_files=false[bool]' \\
|
227 |
+
-o 'ftp-listings_expiry_time=60[int]' \\
|
228 |
+
-o 'ftp-username=anonymous' \\
|
229 |
+
-o 'ftp-password=xieyanbo'
|
230 |
+
"""
|
231 |
+
|
232 |
+
class RawDescriptionArgumentParser(argparse.ArgumentParser):
|
233 |
+
def format_help(self):
|
234 |
+
usage = super().format_help()
|
235 |
+
parts = usage.split("\n\n")
|
236 |
+
parts[1] = self.description.rstrip()
|
237 |
+
return "\n\n".join(parts)
|
238 |
+
|
239 |
+
parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__)
|
240 |
+
parser.add_argument("--version", action="version", version=__version__)
|
241 |
+
parser.add_argument("url", type=str, help="fs url")
|
242 |
+
parser.add_argument("source_path", type=str, help="source directory in fs")
|
243 |
+
parser.add_argument("mount_point", type=str, help="local directory")
|
244 |
+
parser.add_argument(
|
245 |
+
"-o",
|
246 |
+
"--option",
|
247 |
+
action="append",
|
248 |
+
help="Any options of protocol included in the chained URL",
|
249 |
+
)
|
250 |
+
parser.add_argument(
|
251 |
+
"-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')"
|
252 |
+
)
|
253 |
+
parser.add_argument(
|
254 |
+
"-f",
|
255 |
+
"--foreground",
|
256 |
+
action="store_false",
|
257 |
+
help="Running in foreground or not (Default: False)",
|
258 |
+
)
|
259 |
+
parser.add_argument(
|
260 |
+
"-t",
|
261 |
+
"--threads",
|
262 |
+
action="store_false",
|
263 |
+
help="Running with threads support (Default: False)",
|
264 |
+
)
|
265 |
+
parser.add_argument(
|
266 |
+
"-r",
|
267 |
+
"--ready-file",
|
268 |
+
action="store_false",
|
269 |
+
help="The `.fuse_ready` file will exist after FUSE is ready. "
|
270 |
+
"(Debugging purpose, Default: False)",
|
271 |
+
)
|
272 |
+
args = parser.parse_args(args)
|
273 |
+
|
274 |
+
kwargs = {}
|
275 |
+
for item in args.option or []:
|
276 |
+
key, sep, value = item.partition("=")
|
277 |
+
if not sep:
|
278 |
+
parser.error(message=f"Wrong option: {item!r}")
|
279 |
+
val = value.lower()
|
280 |
+
if val.endswith("[int]"):
|
281 |
+
value = int(value[: -len("[int]")])
|
282 |
+
elif val.endswith("[bool]"):
|
283 |
+
value = val[: -len("[bool]")] in ["1", "yes", "true"]
|
284 |
+
|
285 |
+
if "-" in key:
|
286 |
+
fs_name, setting_name = key.split("-", 1)
|
287 |
+
if fs_name in kwargs:
|
288 |
+
kwargs[fs_name][setting_name] = value
|
289 |
+
else:
|
290 |
+
kwargs[fs_name] = {setting_name: value}
|
291 |
+
else:
|
292 |
+
kwargs[key] = value
|
293 |
+
|
294 |
+
if args.log_file:
|
295 |
+
logging.basicConfig(
|
296 |
+
level=logging.DEBUG,
|
297 |
+
filename=args.log_file,
|
298 |
+
format="%(asctime)s %(message)s",
|
299 |
+
)
|
300 |
+
|
301 |
+
class LoggingFUSEr(FUSEr, LoggingMixIn):
|
302 |
+
pass
|
303 |
+
|
304 |
+
fuser = LoggingFUSEr
|
305 |
+
else:
|
306 |
+
fuser = FUSEr
|
307 |
+
|
308 |
+
fs, url_path = url_to_fs(args.url, **kwargs)
|
309 |
+
logger.debug("Mounting %s to %s", url_path, str(args.mount_point))
|
310 |
+
run(
|
311 |
+
fs,
|
312 |
+
args.source_path,
|
313 |
+
args.mount_point,
|
314 |
+
foreground=args.foreground,
|
315 |
+
threads=args.threads,
|
316 |
+
ready_file=args.ready_file,
|
317 |
+
ops_class=fuser,
|
318 |
+
)
|
319 |
+
|
320 |
+
|
321 |
+
if __name__ == "__main__":
|
322 |
+
import sys
|
323 |
+
|
324 |
+
main(sys.argv[1:])
|
env-llmeval/lib/python3.10/site-packages/fsspec/generic.py
ADDED
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import inspect
|
4 |
+
import logging
|
5 |
+
import os
|
6 |
+
import shutil
|
7 |
+
import uuid
|
8 |
+
from typing import Optional
|
9 |
+
|
10 |
+
from .asyn import AsyncFileSystem, _run_coros_in_chunks, sync_wrapper
|
11 |
+
from .callbacks import DEFAULT_CALLBACK
|
12 |
+
from .core import filesystem, get_filesystem_class, split_protocol, url_to_fs
|
13 |
+
|
14 |
+
_generic_fs = {}
|
15 |
+
logger = logging.getLogger("fsspec.generic")
|
16 |
+
|
17 |
+
|
18 |
+
def set_generic_fs(protocol, **storage_options):
|
19 |
+
_generic_fs[protocol] = filesystem(protocol, **storage_options)
|
20 |
+
|
21 |
+
|
22 |
+
default_method = "default"
|
23 |
+
|
24 |
+
|
25 |
+
def _resolve_fs(url, method=None, protocol=None, storage_options=None):
|
26 |
+
"""Pick instance of backend FS"""
|
27 |
+
method = method or default_method
|
28 |
+
protocol = protocol or split_protocol(url)[0]
|
29 |
+
storage_options = storage_options or {}
|
30 |
+
if method == "default":
|
31 |
+
return filesystem(protocol)
|
32 |
+
if method == "generic":
|
33 |
+
return _generic_fs[protocol]
|
34 |
+
if method == "current":
|
35 |
+
cls = get_filesystem_class(protocol)
|
36 |
+
return cls.current()
|
37 |
+
if method == "options":
|
38 |
+
fs, _ = url_to_fs(url, **storage_options.get(protocol, {}))
|
39 |
+
return fs
|
40 |
+
raise ValueError(f"Unknown FS resolution method: {method}")
|
41 |
+
|
42 |
+
|
43 |
+
def rsync(
|
44 |
+
source,
|
45 |
+
destination,
|
46 |
+
delete_missing=False,
|
47 |
+
source_field="size",
|
48 |
+
dest_field="size",
|
49 |
+
update_cond="different",
|
50 |
+
inst_kwargs=None,
|
51 |
+
fs=None,
|
52 |
+
**kwargs,
|
53 |
+
):
|
54 |
+
"""Sync files between two directory trees
|
55 |
+
|
56 |
+
(experimental)
|
57 |
+
|
58 |
+
Parameters
|
59 |
+
----------
|
60 |
+
source: str
|
61 |
+
Root of the directory tree to take files from. This must be a directory, but
|
62 |
+
do not include any terminating "/" character
|
63 |
+
destination: str
|
64 |
+
Root path to copy into. The contents of this location should be
|
65 |
+
identical to the contents of ``source`` when done. This will be made a
|
66 |
+
directory, and the terminal "/" should not be included.
|
67 |
+
delete_missing: bool
|
68 |
+
If there are paths in the destination that don't exist in the
|
69 |
+
source and this is True, delete them. Otherwise, leave them alone.
|
70 |
+
source_field: str | callable
|
71 |
+
If ``update_field`` is "different", this is the key in the info
|
72 |
+
of source files to consider for difference. Maybe a function of the
|
73 |
+
info dict.
|
74 |
+
dest_field: str | callable
|
75 |
+
If ``update_field`` is "different", this is the key in the info
|
76 |
+
of destination files to consider for difference. May be a function of
|
77 |
+
the info dict.
|
78 |
+
update_cond: "different"|"always"|"never"
|
79 |
+
If "always", every file is copied, regardless of whether it exists in
|
80 |
+
the destination. If "never", files that exist in the destination are
|
81 |
+
not copied again. If "different" (default), only copy if the info
|
82 |
+
fields given by ``source_field`` and ``dest_field`` (usually "size")
|
83 |
+
are different. Other comparisons may be added in the future.
|
84 |
+
inst_kwargs: dict|None
|
85 |
+
If ``fs`` is None, use this set of keyword arguments to make a
|
86 |
+
GenericFileSystem instance
|
87 |
+
fs: GenericFileSystem|None
|
88 |
+
Instance to use if explicitly given. The instance defines how to
|
89 |
+
to make downstream file system instances from paths.
|
90 |
+
"""
|
91 |
+
fs = fs or GenericFileSystem(**(inst_kwargs or {}))
|
92 |
+
source = fs._strip_protocol(source)
|
93 |
+
destination = fs._strip_protocol(destination)
|
94 |
+
allfiles = fs.find(source, withdirs=True, detail=True)
|
95 |
+
if not fs.isdir(source):
|
96 |
+
raise ValueError("Can only rsync on a directory")
|
97 |
+
otherfiles = fs.find(destination, withdirs=True, detail=True)
|
98 |
+
dirs = [
|
99 |
+
a
|
100 |
+
for a, v in allfiles.items()
|
101 |
+
if v["type"] == "directory" and a.replace(source, destination) not in otherfiles
|
102 |
+
]
|
103 |
+
logger.debug(f"{len(dirs)} directories to create")
|
104 |
+
if dirs:
|
105 |
+
fs.make_many_dirs(
|
106 |
+
[dirn.replace(source, destination) for dirn in dirs], exist_ok=True
|
107 |
+
)
|
108 |
+
allfiles = {a: v for a, v in allfiles.items() if v["type"] == "file"}
|
109 |
+
logger.debug(f"{len(allfiles)} files to consider for copy")
|
110 |
+
to_delete = [
|
111 |
+
o
|
112 |
+
for o, v in otherfiles.items()
|
113 |
+
if o.replace(destination, source) not in allfiles and v["type"] == "file"
|
114 |
+
]
|
115 |
+
for k, v in allfiles.copy().items():
|
116 |
+
otherfile = k.replace(source, destination)
|
117 |
+
if otherfile in otherfiles:
|
118 |
+
if update_cond == "always":
|
119 |
+
allfiles[k] = otherfile
|
120 |
+
elif update_cond == "different":
|
121 |
+
inf1 = source_field(v) if callable(source_field) else v[source_field]
|
122 |
+
v2 = otherfiles[otherfile]
|
123 |
+
inf2 = dest_field(v2) if callable(dest_field) else v2[dest_field]
|
124 |
+
if inf1 != inf2:
|
125 |
+
# details mismatch, make copy
|
126 |
+
allfiles[k] = otherfile
|
127 |
+
else:
|
128 |
+
# details match, don't copy
|
129 |
+
allfiles.pop(k)
|
130 |
+
else:
|
131 |
+
# file not in target yet
|
132 |
+
allfiles[k] = otherfile
|
133 |
+
logger.debug(f"{len(allfiles)} files to copy")
|
134 |
+
if allfiles:
|
135 |
+
source_files, target_files = zip(*allfiles.items())
|
136 |
+
fs.cp(source_files, target_files, **kwargs)
|
137 |
+
logger.debug(f"{len(to_delete)} files to delete")
|
138 |
+
if delete_missing:
|
139 |
+
fs.rm(to_delete)
|
140 |
+
|
141 |
+
|
142 |
+
class GenericFileSystem(AsyncFileSystem):
|
143 |
+
"""Wrapper over all other FS types
|
144 |
+
|
145 |
+
<experimental!>
|
146 |
+
|
147 |
+
This implementation is a single unified interface to be able to run FS operations
|
148 |
+
over generic URLs, and dispatch to the specific implementations using the URL
|
149 |
+
protocol prefix.
|
150 |
+
|
151 |
+
Note: instances of this FS are always async, even if you never use it with any async
|
152 |
+
backend.
|
153 |
+
"""
|
154 |
+
|
155 |
+
protocol = "generic" # there is no real reason to ever use a protocol with this FS
|
156 |
+
|
157 |
+
def __init__(self, default_method="default", **kwargs):
|
158 |
+
"""
|
159 |
+
|
160 |
+
Parameters
|
161 |
+
----------
|
162 |
+
default_method: str (optional)
|
163 |
+
Defines how to configure backend FS instances. Options are:
|
164 |
+
- "default": instantiate like FSClass(), with no
|
165 |
+
extra arguments; this is the default instance of that FS, and can be
|
166 |
+
configured via the config system
|
167 |
+
- "generic": takes instances from the `_generic_fs` dict in this module,
|
168 |
+
which you must populate before use. Keys are by protocol
|
169 |
+
- "current": takes the most recently instantiated version of each FS
|
170 |
+
"""
|
171 |
+
self.method = default_method
|
172 |
+
super().__init__(**kwargs)
|
173 |
+
|
174 |
+
def _parent(self, path):
|
175 |
+
fs = _resolve_fs(path, self.method)
|
176 |
+
return fs.unstrip_protocol(fs._parent(path))
|
177 |
+
|
178 |
+
def _strip_protocol(self, path):
|
179 |
+
# normalization only
|
180 |
+
fs = _resolve_fs(path, self.method)
|
181 |
+
return fs.unstrip_protocol(fs._strip_protocol(path))
|
182 |
+
|
183 |
+
async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
|
184 |
+
fs = _resolve_fs(path, self.method)
|
185 |
+
if fs.async_impl:
|
186 |
+
out = await fs._find(
|
187 |
+
path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
|
188 |
+
)
|
189 |
+
else:
|
190 |
+
out = fs.find(
|
191 |
+
path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
|
192 |
+
)
|
193 |
+
result = {}
|
194 |
+
for k, v in out.items():
|
195 |
+
name = fs.unstrip_protocol(k)
|
196 |
+
v["name"] = name
|
197 |
+
result[name] = v
|
198 |
+
if detail:
|
199 |
+
return result
|
200 |
+
return list(result)
|
201 |
+
|
202 |
+
async def _info(self, url, **kwargs):
|
203 |
+
fs = _resolve_fs(url, self.method)
|
204 |
+
if fs.async_impl:
|
205 |
+
out = await fs._info(url, **kwargs)
|
206 |
+
else:
|
207 |
+
out = fs.info(url, **kwargs)
|
208 |
+
out["name"] = fs.unstrip_protocol(out["name"])
|
209 |
+
return out
|
210 |
+
|
211 |
+
async def _ls(
|
212 |
+
self,
|
213 |
+
url,
|
214 |
+
detail=True,
|
215 |
+
**kwargs,
|
216 |
+
):
|
217 |
+
fs = _resolve_fs(url, self.method)
|
218 |
+
if fs.async_impl:
|
219 |
+
out = await fs._ls(url, detail=True, **kwargs)
|
220 |
+
else:
|
221 |
+
out = fs.ls(url, detail=True, **kwargs)
|
222 |
+
for o in out:
|
223 |
+
o["name"] = fs.unstrip_protocol(o["name"])
|
224 |
+
if detail:
|
225 |
+
return out
|
226 |
+
else:
|
227 |
+
return [o["name"] for o in out]
|
228 |
+
|
229 |
+
async def _cat_file(
|
230 |
+
self,
|
231 |
+
url,
|
232 |
+
**kwargs,
|
233 |
+
):
|
234 |
+
fs = _resolve_fs(url, self.method)
|
235 |
+
if fs.async_impl:
|
236 |
+
return await fs._cat_file(url, **kwargs)
|
237 |
+
else:
|
238 |
+
return fs.cat_file(url, **kwargs)
|
239 |
+
|
240 |
+
async def _pipe_file(
|
241 |
+
self,
|
242 |
+
path,
|
243 |
+
value,
|
244 |
+
**kwargs,
|
245 |
+
):
|
246 |
+
fs = _resolve_fs(path, self.method)
|
247 |
+
if fs.async_impl:
|
248 |
+
return await fs._pipe_file(path, value, **kwargs)
|
249 |
+
else:
|
250 |
+
return fs.pipe_file(path, value, **kwargs)
|
251 |
+
|
252 |
+
async def _rm(self, url, **kwargs):
|
253 |
+
urls = url
|
254 |
+
if isinstance(urls, str):
|
255 |
+
urls = [urls]
|
256 |
+
fs = _resolve_fs(urls[0], self.method)
|
257 |
+
if fs.async_impl:
|
258 |
+
await fs._rm(urls, **kwargs)
|
259 |
+
else:
|
260 |
+
fs.rm(url, **kwargs)
|
261 |
+
|
262 |
+
async def _makedirs(self, path, exist_ok=False):
|
263 |
+
logger.debug("Make dir %s", path)
|
264 |
+
fs = _resolve_fs(path, self.method)
|
265 |
+
if fs.async_impl:
|
266 |
+
await fs._makedirs(path, exist_ok=exist_ok)
|
267 |
+
else:
|
268 |
+
fs.makedirs(path, exist_ok=exist_ok)
|
269 |
+
|
270 |
+
def rsync(self, source, destination, **kwargs):
|
271 |
+
"""Sync files between two directory trees
|
272 |
+
|
273 |
+
See `func:rsync` for more details.
|
274 |
+
"""
|
275 |
+
rsync(source, destination, fs=self, **kwargs)
|
276 |
+
|
277 |
+
async def _cp_file(
|
278 |
+
self,
|
279 |
+
url,
|
280 |
+
url2,
|
281 |
+
blocksize=2**20,
|
282 |
+
callback=DEFAULT_CALLBACK,
|
283 |
+
**kwargs,
|
284 |
+
):
|
285 |
+
fs = _resolve_fs(url, self.method)
|
286 |
+
fs2 = _resolve_fs(url2, self.method)
|
287 |
+
if fs is fs2:
|
288 |
+
# pure remote
|
289 |
+
if fs.async_impl:
|
290 |
+
return await fs._cp_file(url, url2, **kwargs)
|
291 |
+
else:
|
292 |
+
return fs.cp_file(url, url2, **kwargs)
|
293 |
+
kw = {"blocksize": 0, "cache_type": "none"}
|
294 |
+
try:
|
295 |
+
f1 = (
|
296 |
+
await fs.open_async(url, "rb")
|
297 |
+
if hasattr(fs, "open_async")
|
298 |
+
else fs.open(url, "rb", **kw)
|
299 |
+
)
|
300 |
+
callback.set_size(await maybe_await(f1.size))
|
301 |
+
f2 = (
|
302 |
+
await fs2.open_async(url2, "wb")
|
303 |
+
if hasattr(fs2, "open_async")
|
304 |
+
else fs2.open(url2, "wb", **kw)
|
305 |
+
)
|
306 |
+
while f1.size is None or f2.tell() < f1.size:
|
307 |
+
data = await maybe_await(f1.read(blocksize))
|
308 |
+
if f1.size is None and not data:
|
309 |
+
break
|
310 |
+
await maybe_await(f2.write(data))
|
311 |
+
callback.absolute_update(f2.tell())
|
312 |
+
finally:
|
313 |
+
try:
|
314 |
+
await maybe_await(f2.close())
|
315 |
+
await maybe_await(f1.close())
|
316 |
+
except NameError:
|
317 |
+
# fail while opening f1 or f2
|
318 |
+
pass
|
319 |
+
|
320 |
+
async def _make_many_dirs(self, urls, exist_ok=True):
|
321 |
+
fs = _resolve_fs(urls[0], self.method)
|
322 |
+
if fs.async_impl:
|
323 |
+
coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls]
|
324 |
+
await _run_coros_in_chunks(coros)
|
325 |
+
else:
|
326 |
+
for u in urls:
|
327 |
+
fs.makedirs(u, exist_ok=exist_ok)
|
328 |
+
|
329 |
+
make_many_dirs = sync_wrapper(_make_many_dirs)
|
330 |
+
|
331 |
+
async def _copy(
|
332 |
+
self,
|
333 |
+
path1: list[str],
|
334 |
+
path2: list[str],
|
335 |
+
recursive: bool = False,
|
336 |
+
on_error: str = "ignore",
|
337 |
+
maxdepth: Optional[int] = None,
|
338 |
+
batch_size: Optional[int] = None,
|
339 |
+
tempdir: Optional[str] = None,
|
340 |
+
**kwargs,
|
341 |
+
):
|
342 |
+
if recursive:
|
343 |
+
raise NotImplementedError
|
344 |
+
fs = _resolve_fs(path1[0], self.method)
|
345 |
+
fs2 = _resolve_fs(path2[0], self.method)
|
346 |
+
# not expanding paths atm., assume call is from rsync()
|
347 |
+
if fs is fs2:
|
348 |
+
# pure remote
|
349 |
+
if fs.async_impl:
|
350 |
+
return await fs._copy(path1, path2, **kwargs)
|
351 |
+
else:
|
352 |
+
return fs.copy(path1, path2, **kwargs)
|
353 |
+
await copy_file_op(
|
354 |
+
fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error
|
355 |
+
)
|
356 |
+
|
357 |
+
|
358 |
+
async def copy_file_op(
|
359 |
+
fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore"
|
360 |
+
):
|
361 |
+
import tempfile
|
362 |
+
|
363 |
+
tempdir = tempdir or tempfile.mkdtemp()
|
364 |
+
try:
|
365 |
+
coros = [
|
366 |
+
_copy_file_op(
|
367 |
+
fs1,
|
368 |
+
u1,
|
369 |
+
fs2,
|
370 |
+
u2,
|
371 |
+
os.path.join(tempdir, uuid.uuid4().hex),
|
372 |
+
on_error=on_error,
|
373 |
+
)
|
374 |
+
for u1, u2 in zip(url1, url2)
|
375 |
+
]
|
376 |
+
await _run_coros_in_chunks(coros, batch_size=batch_size)
|
377 |
+
finally:
|
378 |
+
shutil.rmtree(tempdir)
|
379 |
+
|
380 |
+
|
381 |
+
async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"):
|
382 |
+
ex = () if on_error == "raise" else Exception
|
383 |
+
logger.debug("Copy %s -> %s", url1, url2)
|
384 |
+
try:
|
385 |
+
if fs1.async_impl:
|
386 |
+
await fs1._get_file(url1, local)
|
387 |
+
else:
|
388 |
+
fs1.get_file(url1, local)
|
389 |
+
if fs2.async_impl:
|
390 |
+
await fs2._put_file(local, url2)
|
391 |
+
else:
|
392 |
+
fs2.put_file(local, url2)
|
393 |
+
os.unlink(local)
|
394 |
+
logger.debug("Copy %s -> %s; done", url1, url2)
|
395 |
+
except ex as e:
|
396 |
+
logger.debug("ignoring cp exception for %s: %s", url1, e)
|
397 |
+
|
398 |
+
|
399 |
+
async def maybe_await(cor):
|
400 |
+
if inspect.iscoroutine(cor):
|
401 |
+
return await cor
|
402 |
+
else:
|
403 |
+
return cor
|
env-llmeval/lib/python3.10/site-packages/fsspec/gui.py
ADDED
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import contextlib
|
3 |
+
import logging
|
4 |
+
import os
|
5 |
+
import re
|
6 |
+
from typing import ClassVar, Sequence
|
7 |
+
|
8 |
+
import panel as pn
|
9 |
+
|
10 |
+
from .core import OpenFile, get_filesystem_class, split_protocol
|
11 |
+
from .registry import known_implementations
|
12 |
+
|
13 |
+
pn.extension()
|
14 |
+
logger = logging.getLogger("fsspec.gui")
|
15 |
+
|
16 |
+
|
17 |
+
class SigSlot:
|
18 |
+
"""Signal-slot mixin, for Panel event passing
|
19 |
+
|
20 |
+
Include this class in a widget manager's superclasses to be able to
|
21 |
+
register events and callbacks on Panel widgets managed by that class.
|
22 |
+
|
23 |
+
The method ``_register`` should be called as widgets are added, and external
|
24 |
+
code should call ``connect`` to associate callbacks.
|
25 |
+
|
26 |
+
By default, all signals emit a DEBUG logging statement.
|
27 |
+
"""
|
28 |
+
|
29 |
+
# names of signals that this class may emit each of which must be
|
30 |
+
# set by _register for any new instance
|
31 |
+
signals: ClassVar[Sequence[str]] = []
|
32 |
+
# names of actions that this class may respond to
|
33 |
+
slots: ClassVar[Sequence[str]] = []
|
34 |
+
|
35 |
+
# each of which must be a method name
|
36 |
+
|
37 |
+
def __init__(self):
|
38 |
+
self._ignoring_events = False
|
39 |
+
self._sigs = {}
|
40 |
+
self._map = {}
|
41 |
+
self._setup()
|
42 |
+
|
43 |
+
def _setup(self):
|
44 |
+
"""Create GUI elements and register signals"""
|
45 |
+
self.panel = pn.pane.PaneBase()
|
46 |
+
# no signals to set up in the base class
|
47 |
+
|
48 |
+
def _register(
|
49 |
+
self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
|
50 |
+
):
|
51 |
+
"""Watch the given attribute of a widget and assign it a named event
|
52 |
+
|
53 |
+
This is normally called at the time a widget is instantiated, in the
|
54 |
+
class which owns it.
|
55 |
+
|
56 |
+
Parameters
|
57 |
+
----------
|
58 |
+
widget : pn.layout.Panel or None
|
59 |
+
Widget to watch. If None, an anonymous signal not associated with
|
60 |
+
any widget.
|
61 |
+
name : str
|
62 |
+
Name of this event
|
63 |
+
thing : str
|
64 |
+
Attribute of the given widget to watch
|
65 |
+
log_level : int
|
66 |
+
When the signal is triggered, a logging event of the given level
|
67 |
+
will be fired in the dfviz logger.
|
68 |
+
auto : bool
|
69 |
+
If True, automatically connects with a method in this class of the
|
70 |
+
same name.
|
71 |
+
"""
|
72 |
+
if name not in self.signals:
|
73 |
+
raise ValueError(f"Attempt to assign an undeclared signal: {name}")
|
74 |
+
self._sigs[name] = {
|
75 |
+
"widget": widget,
|
76 |
+
"callbacks": [],
|
77 |
+
"thing": thing,
|
78 |
+
"log": log_level,
|
79 |
+
}
|
80 |
+
wn = "-".join(
|
81 |
+
[
|
82 |
+
getattr(widget, "name", str(widget)) if widget is not None else "none",
|
83 |
+
thing,
|
84 |
+
]
|
85 |
+
)
|
86 |
+
self._map[wn] = name
|
87 |
+
if widget is not None:
|
88 |
+
widget.param.watch(self._signal, thing, onlychanged=True)
|
89 |
+
if auto and hasattr(self, name):
|
90 |
+
self.connect(name, getattr(self, name))
|
91 |
+
|
92 |
+
def _repr_mimebundle_(self, *args, **kwargs):
|
93 |
+
"""Display in a notebook or a server"""
|
94 |
+
try:
|
95 |
+
return self.panel._repr_mimebundle_(*args, **kwargs)
|
96 |
+
except (ValueError, AttributeError):
|
97 |
+
raise NotImplementedError("Panel does not seem to be set " "up properly")
|
98 |
+
|
99 |
+
def connect(self, signal, slot):
|
100 |
+
"""Associate call back with given event
|
101 |
+
|
102 |
+
The callback must be a function which takes the "new" value of the
|
103 |
+
watched attribute as the only parameter. If the callback return False,
|
104 |
+
this cancels any further processing of the given event.
|
105 |
+
|
106 |
+
Alternatively, the callback can be a string, in which case it means
|
107 |
+
emitting the correspondingly-named event (i.e., connect to self)
|
108 |
+
"""
|
109 |
+
self._sigs[signal]["callbacks"].append(slot)
|
110 |
+
|
111 |
+
def _signal(self, event):
|
112 |
+
"""This is called by a an action on a widget
|
113 |
+
|
114 |
+
Within an self.ignore_events context, nothing happens.
|
115 |
+
|
116 |
+
Tests can execute this method by directly changing the values of
|
117 |
+
widget components.
|
118 |
+
"""
|
119 |
+
if not self._ignoring_events:
|
120 |
+
wn = "-".join([event.obj.name, event.name])
|
121 |
+
if wn in self._map and self._map[wn] in self._sigs:
|
122 |
+
self._emit(self._map[wn], event.new)
|
123 |
+
|
124 |
+
@contextlib.contextmanager
|
125 |
+
def ignore_events(self):
|
126 |
+
"""Temporarily turn off events processing in this instance
|
127 |
+
|
128 |
+
(does not propagate to children)
|
129 |
+
"""
|
130 |
+
self._ignoring_events = True
|
131 |
+
try:
|
132 |
+
yield
|
133 |
+
finally:
|
134 |
+
self._ignoring_events = False
|
135 |
+
|
136 |
+
def _emit(self, sig, value=None):
|
137 |
+
"""An event happened, call its callbacks
|
138 |
+
|
139 |
+
This method can be used in tests to simulate message passing without
|
140 |
+
directly changing visual elements.
|
141 |
+
|
142 |
+
Calling of callbacks will halt whenever one returns False.
|
143 |
+
"""
|
144 |
+
logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
|
145 |
+
for callback in self._sigs[sig]["callbacks"]:
|
146 |
+
if isinstance(callback, str):
|
147 |
+
self._emit(callback)
|
148 |
+
else:
|
149 |
+
try:
|
150 |
+
# running callbacks should not break the interface
|
151 |
+
ret = callback(value)
|
152 |
+
if ret is False:
|
153 |
+
break
|
154 |
+
except Exception as e:
|
155 |
+
logger.exception(
|
156 |
+
"Exception (%s) while executing callback for signal: %s",
|
157 |
+
e,
|
158 |
+
sig,
|
159 |
+
)
|
160 |
+
|
161 |
+
def show(self, threads=False):
|
162 |
+
"""Open a new browser tab and display this instance's interface"""
|
163 |
+
self.panel.show(threads=threads, verbose=False)
|
164 |
+
return self
|
165 |
+
|
166 |
+
|
167 |
+
class SingleSelect(SigSlot):
|
168 |
+
"""A multiselect which only allows you to select one item for an event"""
|
169 |
+
|
170 |
+
signals = ["_selected", "selected"] # the first is internal
|
171 |
+
slots = ["set_options", "set_selection", "add", "clear", "select"]
|
172 |
+
|
173 |
+
def __init__(self, **kwargs):
|
174 |
+
self.kwargs = kwargs
|
175 |
+
super().__init__()
|
176 |
+
|
177 |
+
def _setup(self):
|
178 |
+
self.panel = pn.widgets.MultiSelect(**self.kwargs)
|
179 |
+
self._register(self.panel, "_selected", "value")
|
180 |
+
self._register(None, "selected")
|
181 |
+
self.connect("_selected", self.select_one)
|
182 |
+
|
183 |
+
def _signal(self, *args, **kwargs):
|
184 |
+
super()._signal(*args, **kwargs)
|
185 |
+
|
186 |
+
def select_one(self, *_):
|
187 |
+
with self.ignore_events():
|
188 |
+
val = [self.panel.value[-1]] if self.panel.value else []
|
189 |
+
self.panel.value = val
|
190 |
+
self._emit("selected", self.panel.value)
|
191 |
+
|
192 |
+
def set_options(self, options):
|
193 |
+
self.panel.options = options
|
194 |
+
|
195 |
+
def clear(self):
|
196 |
+
self.panel.options = []
|
197 |
+
|
198 |
+
@property
|
199 |
+
def value(self):
|
200 |
+
return self.panel.value
|
201 |
+
|
202 |
+
def set_selection(self, selection):
|
203 |
+
self.panel.value = [selection]
|
204 |
+
|
205 |
+
|
206 |
+
class FileSelector(SigSlot):
|
207 |
+
"""Panel-based graphical file selector widget
|
208 |
+
|
209 |
+
Instances of this widget are interactive and can be displayed in jupyter by having
|
210 |
+
them as the output of a cell, or in a separate browser tab using ``.show()``.
|
211 |
+
"""
|
212 |
+
|
213 |
+
signals = [
|
214 |
+
"protocol_changed",
|
215 |
+
"selection_changed",
|
216 |
+
"directory_entered",
|
217 |
+
"home_clicked",
|
218 |
+
"up_clicked",
|
219 |
+
"go_clicked",
|
220 |
+
"filters_changed",
|
221 |
+
]
|
222 |
+
slots = ["set_filters", "go_home"]
|
223 |
+
|
224 |
+
def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
|
225 |
+
"""
|
226 |
+
|
227 |
+
Parameters
|
228 |
+
----------
|
229 |
+
url : str (optional)
|
230 |
+
Initial value of the URL to populate the dialog; should include protocol
|
231 |
+
filters : list(str) (optional)
|
232 |
+
File endings to include in the listings. If not included, all files are
|
233 |
+
allowed. Does not affect directories.
|
234 |
+
If given, the endings will appear as checkboxes in the interface
|
235 |
+
ignore : list(str) (optional)
|
236 |
+
Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
|
237 |
+
hidden files on posix
|
238 |
+
kwargs : dict (optional)
|
239 |
+
To pass to file system instance
|
240 |
+
"""
|
241 |
+
if url:
|
242 |
+
self.init_protocol, url = split_protocol(url)
|
243 |
+
else:
|
244 |
+
self.init_protocol, url = "file", os.getcwd()
|
245 |
+
self.init_url = url
|
246 |
+
self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}"
|
247 |
+
self.filters = filters
|
248 |
+
self.ignore = [re.compile(i) for i in ignore or []]
|
249 |
+
self._fs = None
|
250 |
+
super().__init__()
|
251 |
+
|
252 |
+
def _setup(self):
|
253 |
+
self.url = pn.widgets.TextInput(
|
254 |
+
name="url",
|
255 |
+
value=self.init_url,
|
256 |
+
align="end",
|
257 |
+
sizing_mode="stretch_width",
|
258 |
+
width_policy="max",
|
259 |
+
)
|
260 |
+
self.protocol = pn.widgets.Select(
|
261 |
+
options=sorted(known_implementations),
|
262 |
+
value=self.init_protocol,
|
263 |
+
name="protocol",
|
264 |
+
align="center",
|
265 |
+
)
|
266 |
+
self.kwargs = pn.widgets.TextInput(
|
267 |
+
name="kwargs", value=self.init_kwargs, align="center"
|
268 |
+
)
|
269 |
+
self.go = pn.widgets.Button(name="⇨", align="end", width=45)
|
270 |
+
self.main = SingleSelect(size=10)
|
271 |
+
self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
|
272 |
+
self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
|
273 |
+
|
274 |
+
self._register(self.protocol, "protocol_changed", auto=True)
|
275 |
+
self._register(self.go, "go_clicked", "clicks", auto=True)
|
276 |
+
self._register(self.up, "up_clicked", "clicks", auto=True)
|
277 |
+
self._register(self.home, "home_clicked", "clicks", auto=True)
|
278 |
+
self._register(None, "selection_changed")
|
279 |
+
self.main.connect("selected", self.selection_changed)
|
280 |
+
self._register(None, "directory_entered")
|
281 |
+
self.prev_protocol = self.protocol.value
|
282 |
+
self.prev_kwargs = self.storage_options
|
283 |
+
|
284 |
+
self.filter_sel = pn.widgets.CheckBoxGroup(
|
285 |
+
value=[], options=[], inline=False, align="end", width_policy="min"
|
286 |
+
)
|
287 |
+
self._register(self.filter_sel, "filters_changed", auto=True)
|
288 |
+
|
289 |
+
self.panel = pn.Column(
|
290 |
+
pn.Row(self.protocol, self.kwargs),
|
291 |
+
pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
|
292 |
+
self.main.panel,
|
293 |
+
)
|
294 |
+
self.set_filters(self.filters)
|
295 |
+
self.go_clicked()
|
296 |
+
|
297 |
+
def set_filters(self, filters=None):
|
298 |
+
self.filters = filters
|
299 |
+
if filters:
|
300 |
+
self.filter_sel.options = filters
|
301 |
+
self.filter_sel.value = filters
|
302 |
+
else:
|
303 |
+
self.filter_sel.options = []
|
304 |
+
self.filter_sel.value = []
|
305 |
+
|
306 |
+
@property
|
307 |
+
def storage_options(self):
|
308 |
+
"""Value of the kwargs box as a dictionary"""
|
309 |
+
return ast.literal_eval(self.kwargs.value) or {}
|
310 |
+
|
311 |
+
@property
|
312 |
+
def fs(self):
|
313 |
+
"""Current filesystem instance"""
|
314 |
+
if self._fs is None:
|
315 |
+
cls = get_filesystem_class(self.protocol.value)
|
316 |
+
self._fs = cls(**self.storage_options)
|
317 |
+
return self._fs
|
318 |
+
|
319 |
+
@property
|
320 |
+
def urlpath(self):
|
321 |
+
"""URL of currently selected item"""
|
322 |
+
return (
|
323 |
+
(f"{self.protocol.value}://{self.main.value[0]}")
|
324 |
+
if self.main.value
|
325 |
+
else None
|
326 |
+
)
|
327 |
+
|
328 |
+
def open_file(self, mode="rb", compression=None, encoding=None):
|
329 |
+
"""Create OpenFile instance for the currently selected item
|
330 |
+
|
331 |
+
For example, in a notebook you might do something like
|
332 |
+
|
333 |
+
.. code-block::
|
334 |
+
|
335 |
+
[ ]: sel = FileSelector(); sel
|
336 |
+
|
337 |
+
# user selects their file
|
338 |
+
|
339 |
+
[ ]: with sel.open_file('rb') as f:
|
340 |
+
... out = f.read()
|
341 |
+
|
342 |
+
Parameters
|
343 |
+
----------
|
344 |
+
mode: str (optional)
|
345 |
+
Open mode for the file.
|
346 |
+
compression: str (optional)
|
347 |
+
The interact with the file as compressed. Set to 'infer' to guess
|
348 |
+
compression from the file ending
|
349 |
+
encoding: str (optional)
|
350 |
+
If using text mode, use this encoding; defaults to UTF8.
|
351 |
+
"""
|
352 |
+
if self.urlpath is None:
|
353 |
+
raise ValueError("No file selected")
|
354 |
+
return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
|
355 |
+
|
356 |
+
def filters_changed(self, values):
|
357 |
+
self.filters = values
|
358 |
+
self.go_clicked()
|
359 |
+
|
360 |
+
def selection_changed(self, *_):
|
361 |
+
if self.urlpath is None:
|
362 |
+
return
|
363 |
+
if self.fs.isdir(self.urlpath):
|
364 |
+
self.url.value = self.fs._strip_protocol(self.urlpath)
|
365 |
+
self.go_clicked()
|
366 |
+
|
367 |
+
def go_clicked(self, *_):
|
368 |
+
if (
|
369 |
+
self.prev_protocol != self.protocol.value
|
370 |
+
or self.prev_kwargs != self.storage_options
|
371 |
+
):
|
372 |
+
self._fs = None # causes fs to be recreated
|
373 |
+
self.prev_protocol = self.protocol.value
|
374 |
+
self.prev_kwargs = self.storage_options
|
375 |
+
listing = sorted(
|
376 |
+
self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
|
377 |
+
)
|
378 |
+
listing = [
|
379 |
+
l
|
380 |
+
for l in listing
|
381 |
+
if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
|
382 |
+
]
|
383 |
+
folders = {
|
384 |
+
"📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
|
385 |
+
for o in listing
|
386 |
+
if o["type"] == "directory"
|
387 |
+
}
|
388 |
+
files = {
|
389 |
+
"📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
|
390 |
+
for o in listing
|
391 |
+
if o["type"] == "file"
|
392 |
+
}
|
393 |
+
if self.filters:
|
394 |
+
files = {
|
395 |
+
k: v
|
396 |
+
for k, v in files.items()
|
397 |
+
if any(v.endswith(ext) for ext in self.filters)
|
398 |
+
}
|
399 |
+
self.main.set_options(dict(**folders, **files))
|
400 |
+
|
401 |
+
def protocol_changed(self, *_):
|
402 |
+
self._fs = None
|
403 |
+
self.main.options = []
|
404 |
+
self.url.value = ""
|
405 |
+
|
406 |
+
def home_clicked(self, *_):
|
407 |
+
self.protocol.value = self.init_protocol
|
408 |
+
self.kwargs.value = self.init_kwargs
|
409 |
+
self.url.value = self.init_url
|
410 |
+
self.go_clicked()
|
411 |
+
|
412 |
+
def up_clicked(self, *_):
|
413 |
+
self.url.value = self.fs._parent(self.url.value)
|
414 |
+
self.go_clicked()
|
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/data.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import io
|
3 |
+
from urllib.parse import unquote
|
4 |
+
|
5 |
+
from fsspec import AbstractFileSystem
|
6 |
+
|
7 |
+
|
8 |
+
class DataFileSystem(AbstractFileSystem):
|
9 |
+
"""A handy decoder for data-URLs
|
10 |
+
|
11 |
+
Example
|
12 |
+
-------
|
13 |
+
>>> with fsspec.open("data:,Hello%2C%20World%21") as f:
|
14 |
+
... print(f.read())
|
15 |
+
b"Hello, World!"
|
16 |
+
|
17 |
+
"""
|
18 |
+
|
19 |
+
protocol = "data"
|
20 |
+
|
21 |
+
def __init__(self, **kwargs):
|
22 |
+
"""No parameters for this filesystem"""
|
23 |
+
super().__init__(**kwargs)
|
24 |
+
|
25 |
+
def cat_file(self, path, start=None, end=None, **kwargs):
|
26 |
+
pref, data = path.split(",", 1)
|
27 |
+
if pref.endswith("base64"):
|
28 |
+
return base64.b64decode(data)[start:end]
|
29 |
+
return unquote(data).encode()[start:end]
|
30 |
+
|
31 |
+
def info(self, path, **kwargs):
|
32 |
+
pref, name = path.split(",", 1)
|
33 |
+
data = self.cat_file(path)
|
34 |
+
mime = pref.split(":", 1)[1].split(";", 1)[0]
|
35 |
+
return {"name": name, "size": len(data), "type": "file", "mimetype": mime}
|
36 |
+
|
37 |
+
def _open(
|
38 |
+
self,
|
39 |
+
path,
|
40 |
+
mode="rb",
|
41 |
+
block_size=None,
|
42 |
+
autocommit=True,
|
43 |
+
cache_options=None,
|
44 |
+
**kwargs,
|
45 |
+
):
|
46 |
+
if "r" not in mode:
|
47 |
+
raise ValueError("Read only filesystem")
|
48 |
+
return io.BytesIO(self.cat_file(path))
|
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/dirfs.py
ADDED
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .. import filesystem
|
2 |
+
from ..asyn import AsyncFileSystem
|
3 |
+
|
4 |
+
|
5 |
+
class DirFileSystem(AsyncFileSystem):
|
6 |
+
"""Directory prefix filesystem
|
7 |
+
|
8 |
+
The DirFileSystem is a filesystem-wrapper. It assumes every path it is dealing with
|
9 |
+
is relative to the `path`. After performing the necessary paths operation it
|
10 |
+
delegates everything to the wrapped filesystem.
|
11 |
+
"""
|
12 |
+
|
13 |
+
protocol = "dir"
|
14 |
+
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
path=None,
|
18 |
+
fs=None,
|
19 |
+
fo=None,
|
20 |
+
target_protocol=None,
|
21 |
+
target_options=None,
|
22 |
+
**storage_options,
|
23 |
+
):
|
24 |
+
"""
|
25 |
+
Parameters
|
26 |
+
----------
|
27 |
+
path: str
|
28 |
+
Path to the directory.
|
29 |
+
fs: AbstractFileSystem
|
30 |
+
An instantiated filesystem to wrap.
|
31 |
+
target_protocol, target_options:
|
32 |
+
if fs is none, construct it from these
|
33 |
+
fo: str
|
34 |
+
Alternate for path; do not provide both
|
35 |
+
"""
|
36 |
+
super().__init__(**storage_options)
|
37 |
+
if fs is None:
|
38 |
+
fs = filesystem(protocol=target_protocol, **(target_options or {}))
|
39 |
+
if (path is not None) ^ (fo is not None) is False:
|
40 |
+
raise ValueError("Provide path or fo, not both")
|
41 |
+
path = path or fo
|
42 |
+
|
43 |
+
if self.asynchronous and not fs.async_impl:
|
44 |
+
raise ValueError("can't use asynchronous with non-async fs")
|
45 |
+
|
46 |
+
if fs.async_impl and self.asynchronous != fs.asynchronous:
|
47 |
+
raise ValueError("both dirfs and fs should be in the same sync/async mode")
|
48 |
+
|
49 |
+
self.path = fs._strip_protocol(path)
|
50 |
+
self.fs = fs
|
51 |
+
|
52 |
+
def _join(self, path):
|
53 |
+
if isinstance(path, str):
|
54 |
+
if not self.path:
|
55 |
+
return path
|
56 |
+
if not path:
|
57 |
+
return self.path
|
58 |
+
return self.fs.sep.join((self.path, self._strip_protocol(path)))
|
59 |
+
return [self._join(_path) for _path in path]
|
60 |
+
|
61 |
+
def _relpath(self, path):
|
62 |
+
if isinstance(path, str):
|
63 |
+
if not self.path:
|
64 |
+
return path
|
65 |
+
if path == self.path:
|
66 |
+
return ""
|
67 |
+
prefix = self.path + self.fs.sep
|
68 |
+
assert path.startswith(prefix)
|
69 |
+
return path[len(prefix) :]
|
70 |
+
return [self._relpath(_path) for _path in path]
|
71 |
+
|
72 |
+
# Wrappers below
|
73 |
+
|
74 |
+
@property
|
75 |
+
def sep(self):
|
76 |
+
return self.fs.sep
|
77 |
+
|
78 |
+
async def set_session(self, *args, **kwargs):
|
79 |
+
return await self.fs.set_session(*args, **kwargs)
|
80 |
+
|
81 |
+
async def _rm_file(self, path, **kwargs):
|
82 |
+
return await self.fs._rm_file(self._join(path), **kwargs)
|
83 |
+
|
84 |
+
def rm_file(self, path, **kwargs):
|
85 |
+
return self.fs.rm_file(self._join(path), **kwargs)
|
86 |
+
|
87 |
+
async def _rm(self, path, *args, **kwargs):
|
88 |
+
return await self.fs._rm(self._join(path), *args, **kwargs)
|
89 |
+
|
90 |
+
def rm(self, path, *args, **kwargs):
|
91 |
+
return self.fs.rm(self._join(path), *args, **kwargs)
|
92 |
+
|
93 |
+
async def _cp_file(self, path1, path2, **kwargs):
|
94 |
+
return await self.fs._cp_file(self._join(path1), self._join(path2), **kwargs)
|
95 |
+
|
96 |
+
def cp_file(self, path1, path2, **kwargs):
|
97 |
+
return self.fs.cp_file(self._join(path1), self._join(path2), **kwargs)
|
98 |
+
|
99 |
+
async def _copy(
|
100 |
+
self,
|
101 |
+
path1,
|
102 |
+
path2,
|
103 |
+
*args,
|
104 |
+
**kwargs,
|
105 |
+
):
|
106 |
+
return await self.fs._copy(
|
107 |
+
self._join(path1),
|
108 |
+
self._join(path2),
|
109 |
+
*args,
|
110 |
+
**kwargs,
|
111 |
+
)
|
112 |
+
|
113 |
+
def copy(self, path1, path2, *args, **kwargs):
|
114 |
+
return self.fs.copy(
|
115 |
+
self._join(path1),
|
116 |
+
self._join(path2),
|
117 |
+
*args,
|
118 |
+
**kwargs,
|
119 |
+
)
|
120 |
+
|
121 |
+
async def _pipe(self, path, *args, **kwargs):
|
122 |
+
return await self.fs._pipe(self._join(path), *args, **kwargs)
|
123 |
+
|
124 |
+
def pipe(self, path, *args, **kwargs):
|
125 |
+
return self.fs.pipe(self._join(path), *args, **kwargs)
|
126 |
+
|
127 |
+
async def _pipe_file(self, path, *args, **kwargs):
|
128 |
+
return await self.fs._pipe_file(self._join(path), *args, **kwargs)
|
129 |
+
|
130 |
+
def pipe_file(self, path, *args, **kwargs):
|
131 |
+
return self.fs.pipe_file(self._join(path), *args, **kwargs)
|
132 |
+
|
133 |
+
async def _cat_file(self, path, *args, **kwargs):
|
134 |
+
return await self.fs._cat_file(self._join(path), *args, **kwargs)
|
135 |
+
|
136 |
+
def cat_file(self, path, *args, **kwargs):
|
137 |
+
return self.fs.cat_file(self._join(path), *args, **kwargs)
|
138 |
+
|
139 |
+
async def _cat(self, path, *args, **kwargs):
|
140 |
+
ret = await self.fs._cat(
|
141 |
+
self._join(path),
|
142 |
+
*args,
|
143 |
+
**kwargs,
|
144 |
+
)
|
145 |
+
|
146 |
+
if isinstance(ret, dict):
|
147 |
+
return {self._relpath(key): value for key, value in ret.items()}
|
148 |
+
|
149 |
+
return ret
|
150 |
+
|
151 |
+
def cat(self, path, *args, **kwargs):
|
152 |
+
ret = self.fs.cat(
|
153 |
+
self._join(path),
|
154 |
+
*args,
|
155 |
+
**kwargs,
|
156 |
+
)
|
157 |
+
|
158 |
+
if isinstance(ret, dict):
|
159 |
+
return {self._relpath(key): value for key, value in ret.items()}
|
160 |
+
|
161 |
+
return ret
|
162 |
+
|
163 |
+
async def _put_file(self, lpath, rpath, **kwargs):
|
164 |
+
return await self.fs._put_file(lpath, self._join(rpath), **kwargs)
|
165 |
+
|
166 |
+
def put_file(self, lpath, rpath, **kwargs):
|
167 |
+
return self.fs.put_file(lpath, self._join(rpath), **kwargs)
|
168 |
+
|
169 |
+
async def _put(
|
170 |
+
self,
|
171 |
+
lpath,
|
172 |
+
rpath,
|
173 |
+
*args,
|
174 |
+
**kwargs,
|
175 |
+
):
|
176 |
+
return await self.fs._put(
|
177 |
+
lpath,
|
178 |
+
self._join(rpath),
|
179 |
+
*args,
|
180 |
+
**kwargs,
|
181 |
+
)
|
182 |
+
|
183 |
+
def put(self, lpath, rpath, *args, **kwargs):
|
184 |
+
return self.fs.put(
|
185 |
+
lpath,
|
186 |
+
self._join(rpath),
|
187 |
+
*args,
|
188 |
+
**kwargs,
|
189 |
+
)
|
190 |
+
|
191 |
+
async def _get_file(self, rpath, lpath, **kwargs):
|
192 |
+
return await self.fs._get_file(self._join(rpath), lpath, **kwargs)
|
193 |
+
|
194 |
+
def get_file(self, rpath, lpath, **kwargs):
|
195 |
+
return self.fs.get_file(self._join(rpath), lpath, **kwargs)
|
196 |
+
|
197 |
+
async def _get(self, rpath, *args, **kwargs):
|
198 |
+
return await self.fs._get(self._join(rpath), *args, **kwargs)
|
199 |
+
|
200 |
+
def get(self, rpath, *args, **kwargs):
|
201 |
+
return self.fs.get(self._join(rpath), *args, **kwargs)
|
202 |
+
|
203 |
+
async def _isfile(self, path):
|
204 |
+
return await self.fs._isfile(self._join(path))
|
205 |
+
|
206 |
+
def isfile(self, path):
|
207 |
+
return self.fs.isfile(self._join(path))
|
208 |
+
|
209 |
+
async def _isdir(self, path):
|
210 |
+
return await self.fs._isdir(self._join(path))
|
211 |
+
|
212 |
+
def isdir(self, path):
|
213 |
+
return self.fs.isdir(self._join(path))
|
214 |
+
|
215 |
+
async def _size(self, path):
|
216 |
+
return await self.fs._size(self._join(path))
|
217 |
+
|
218 |
+
def size(self, path):
|
219 |
+
return self.fs.size(self._join(path))
|
220 |
+
|
221 |
+
async def _exists(self, path):
|
222 |
+
return await self.fs._exists(self._join(path))
|
223 |
+
|
224 |
+
def exists(self, path):
|
225 |
+
return self.fs.exists(self._join(path))
|
226 |
+
|
227 |
+
async def _info(self, path, **kwargs):
|
228 |
+
return await self.fs._info(self._join(path), **kwargs)
|
229 |
+
|
230 |
+
def info(self, path, **kwargs):
|
231 |
+
return self.fs.info(self._join(path), **kwargs)
|
232 |
+
|
233 |
+
async def _ls(self, path, detail=True, **kwargs):
|
234 |
+
ret = (await self.fs._ls(self._join(path), detail=detail, **kwargs)).copy()
|
235 |
+
if detail:
|
236 |
+
out = []
|
237 |
+
for entry in ret:
|
238 |
+
entry = entry.copy()
|
239 |
+
entry["name"] = self._relpath(entry["name"])
|
240 |
+
out.append(entry)
|
241 |
+
return out
|
242 |
+
|
243 |
+
return self._relpath(ret)
|
244 |
+
|
245 |
+
def ls(self, path, detail=True, **kwargs):
|
246 |
+
ret = self.fs.ls(self._join(path), detail=detail, **kwargs).copy()
|
247 |
+
if detail:
|
248 |
+
out = []
|
249 |
+
for entry in ret:
|
250 |
+
entry = entry.copy()
|
251 |
+
entry["name"] = self._relpath(entry["name"])
|
252 |
+
out.append(entry)
|
253 |
+
return out
|
254 |
+
|
255 |
+
return self._relpath(ret)
|
256 |
+
|
257 |
+
async def _walk(self, path, *args, **kwargs):
|
258 |
+
async for root, dirs, files in self.fs._walk(self._join(path), *args, **kwargs):
|
259 |
+
yield self._relpath(root), dirs, files
|
260 |
+
|
261 |
+
def walk(self, path, *args, **kwargs):
|
262 |
+
for root, dirs, files in self.fs.walk(self._join(path), *args, **kwargs):
|
263 |
+
yield self._relpath(root), dirs, files
|
264 |
+
|
265 |
+
async def _glob(self, path, **kwargs):
|
266 |
+
detail = kwargs.get("detail", False)
|
267 |
+
ret = await self.fs._glob(self._join(path), **kwargs)
|
268 |
+
if detail:
|
269 |
+
return {self._relpath(path): info for path, info in ret.items()}
|
270 |
+
return self._relpath(ret)
|
271 |
+
|
272 |
+
def glob(self, path, **kwargs):
|
273 |
+
detail = kwargs.get("detail", False)
|
274 |
+
ret = self.fs.glob(self._join(path), **kwargs)
|
275 |
+
if detail:
|
276 |
+
return {self._relpath(path): info for path, info in ret.items()}
|
277 |
+
return self._relpath(ret)
|
278 |
+
|
279 |
+
async def _du(self, path, *args, **kwargs):
|
280 |
+
total = kwargs.get("total", True)
|
281 |
+
ret = await self.fs._du(self._join(path), *args, **kwargs)
|
282 |
+
if total:
|
283 |
+
return ret
|
284 |
+
|
285 |
+
return {self._relpath(path): size for path, size in ret.items()}
|
286 |
+
|
287 |
+
def du(self, path, *args, **kwargs):
|
288 |
+
total = kwargs.get("total", True)
|
289 |
+
ret = self.fs.du(self._join(path), *args, **kwargs)
|
290 |
+
if total:
|
291 |
+
return ret
|
292 |
+
|
293 |
+
return {self._relpath(path): size for path, size in ret.items()}
|
294 |
+
|
295 |
+
async def _find(self, path, *args, **kwargs):
|
296 |
+
detail = kwargs.get("detail", False)
|
297 |
+
ret = await self.fs._find(self._join(path), *args, **kwargs)
|
298 |
+
if detail:
|
299 |
+
return {self._relpath(path): info for path, info in ret.items()}
|
300 |
+
return self._relpath(ret)
|
301 |
+
|
302 |
+
def find(self, path, *args, **kwargs):
|
303 |
+
detail = kwargs.get("detail", False)
|
304 |
+
ret = self.fs.find(self._join(path), *args, **kwargs)
|
305 |
+
if detail:
|
306 |
+
return {self._relpath(path): info for path, info in ret.items()}
|
307 |
+
return self._relpath(ret)
|
308 |
+
|
309 |
+
async def _expand_path(self, path, *args, **kwargs):
|
310 |
+
return self._relpath(
|
311 |
+
await self.fs._expand_path(self._join(path), *args, **kwargs)
|
312 |
+
)
|
313 |
+
|
314 |
+
def expand_path(self, path, *args, **kwargs):
|
315 |
+
return self._relpath(self.fs.expand_path(self._join(path), *args, **kwargs))
|
316 |
+
|
317 |
+
async def _mkdir(self, path, *args, **kwargs):
|
318 |
+
return await self.fs._mkdir(self._join(path), *args, **kwargs)
|
319 |
+
|
320 |
+
def mkdir(self, path, *args, **kwargs):
|
321 |
+
return self.fs.mkdir(self._join(path), *args, **kwargs)
|
322 |
+
|
323 |
+
async def _makedirs(self, path, *args, **kwargs):
|
324 |
+
return await self.fs._makedirs(self._join(path), *args, **kwargs)
|
325 |
+
|
326 |
+
def makedirs(self, path, *args, **kwargs):
|
327 |
+
return self.fs.makedirs(self._join(path), *args, **kwargs)
|
328 |
+
|
329 |
+
def rmdir(self, path):
|
330 |
+
return self.fs.rmdir(self._join(path))
|
331 |
+
|
332 |
+
def mv_file(self, path1, path2, **kwargs):
|
333 |
+
return self.fs.mv_file(
|
334 |
+
self._join(path1),
|
335 |
+
self._join(path2),
|
336 |
+
**kwargs,
|
337 |
+
)
|
338 |
+
|
339 |
+
def touch(self, path, **kwargs):
|
340 |
+
return self.fs.touch(self._join(path), **kwargs)
|
341 |
+
|
342 |
+
def created(self, path):
|
343 |
+
return self.fs.created(self._join(path))
|
344 |
+
|
345 |
+
def modified(self, path):
|
346 |
+
return self.fs.modified(self._join(path))
|
347 |
+
|
348 |
+
def sign(self, path, *args, **kwargs):
|
349 |
+
return self.fs.sign(self._join(path), *args, **kwargs)
|
350 |
+
|
351 |
+
def __repr__(self):
|
352 |
+
return f"{self.__class__.__qualname__}(path='{self.path}', fs={self.fs})"
|
353 |
+
|
354 |
+
def open(
|
355 |
+
self,
|
356 |
+
path,
|
357 |
+
*args,
|
358 |
+
**kwargs,
|
359 |
+
):
|
360 |
+
return self.fs.open(
|
361 |
+
self._join(path),
|
362 |
+
*args,
|
363 |
+
**kwargs,
|
364 |
+
)
|
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/ftp.py
ADDED
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import uuid
|
4 |
+
import warnings
|
5 |
+
from ftplib import FTP, Error, error_perm
|
6 |
+
from typing import Any
|
7 |
+
|
8 |
+
from ..spec import AbstractBufferedFile, AbstractFileSystem
|
9 |
+
from ..utils import infer_storage_options, isfilelike
|
10 |
+
|
11 |
+
|
12 |
+
class FTPFileSystem(AbstractFileSystem):
|
13 |
+
"""A filesystem over classic FTP"""
|
14 |
+
|
15 |
+
root_marker = "/"
|
16 |
+
cachable = False
|
17 |
+
protocol = "ftp"
|
18 |
+
|
19 |
+
def __init__(
|
20 |
+
self,
|
21 |
+
host,
|
22 |
+
port=21,
|
23 |
+
username=None,
|
24 |
+
password=None,
|
25 |
+
acct=None,
|
26 |
+
block_size=None,
|
27 |
+
tempdir=None,
|
28 |
+
timeout=30,
|
29 |
+
encoding="utf-8",
|
30 |
+
**kwargs,
|
31 |
+
):
|
32 |
+
"""
|
33 |
+
You can use _get_kwargs_from_urls to get some kwargs from
|
34 |
+
a reasonable FTP url.
|
35 |
+
|
36 |
+
Authentication will be anonymous if username/password are not
|
37 |
+
given.
|
38 |
+
|
39 |
+
Parameters
|
40 |
+
----------
|
41 |
+
host: str
|
42 |
+
The remote server name/ip to connect to
|
43 |
+
port: int
|
44 |
+
Port to connect with
|
45 |
+
username: str or None
|
46 |
+
If authenticating, the user's identifier
|
47 |
+
password: str of None
|
48 |
+
User's password on the server, if using
|
49 |
+
acct: str or None
|
50 |
+
Some servers also need an "account" string for auth
|
51 |
+
block_size: int or None
|
52 |
+
If given, the read-ahead or write buffer size.
|
53 |
+
tempdir: str
|
54 |
+
Directory on remote to put temporary files when in a transaction
|
55 |
+
timeout: int
|
56 |
+
Timeout of the ftp connection in seconds
|
57 |
+
encoding: str
|
58 |
+
Encoding to use for directories and filenames in FTP connection
|
59 |
+
"""
|
60 |
+
super().__init__(**kwargs)
|
61 |
+
self.host = host
|
62 |
+
self.port = port
|
63 |
+
self.tempdir = tempdir or "/tmp"
|
64 |
+
self.cred = username, password, acct
|
65 |
+
self.timeout = timeout
|
66 |
+
self.encoding = encoding
|
67 |
+
if block_size is not None:
|
68 |
+
self.blocksize = block_size
|
69 |
+
else:
|
70 |
+
self.blocksize = 2**16
|
71 |
+
self._connect()
|
72 |
+
|
73 |
+
def _connect(self):
|
74 |
+
if sys.version_info >= (3, 9):
|
75 |
+
self.ftp = FTP(timeout=self.timeout, encoding=self.encoding)
|
76 |
+
elif self.encoding:
|
77 |
+
warnings.warn("`encoding` not supported for python<3.9, ignoring")
|
78 |
+
self.ftp = FTP(timeout=self.timeout)
|
79 |
+
else:
|
80 |
+
self.ftp = FTP(timeout=self.timeout)
|
81 |
+
self.ftp.connect(self.host, self.port)
|
82 |
+
self.ftp.login(*self.cred)
|
83 |
+
|
84 |
+
@classmethod
|
85 |
+
def _strip_protocol(cls, path):
|
86 |
+
return "/" + infer_storage_options(path)["path"].lstrip("/").rstrip("/")
|
87 |
+
|
88 |
+
@staticmethod
|
89 |
+
def _get_kwargs_from_urls(urlpath):
|
90 |
+
out = infer_storage_options(urlpath)
|
91 |
+
out.pop("path", None)
|
92 |
+
out.pop("protocol", None)
|
93 |
+
return out
|
94 |
+
|
95 |
+
def ls(self, path, detail=True, **kwargs):
|
96 |
+
path = self._strip_protocol(path)
|
97 |
+
out = []
|
98 |
+
if path not in self.dircache:
|
99 |
+
try:
|
100 |
+
try:
|
101 |
+
out = [
|
102 |
+
(fn, details)
|
103 |
+
for (fn, details) in self.ftp.mlsd(path)
|
104 |
+
if fn not in [".", ".."]
|
105 |
+
and details["type"] not in ["pdir", "cdir"]
|
106 |
+
]
|
107 |
+
except error_perm:
|
108 |
+
out = _mlsd2(self.ftp, path) # Not platform independent
|
109 |
+
for fn, details in out:
|
110 |
+
if path == "/":
|
111 |
+
path = "" # just for forming the names, below
|
112 |
+
details["name"] = "/".join([path, fn.lstrip("/")])
|
113 |
+
if details["type"] == "file":
|
114 |
+
details["size"] = int(details["size"])
|
115 |
+
else:
|
116 |
+
details["size"] = 0
|
117 |
+
if details["type"] == "dir":
|
118 |
+
details["type"] = "directory"
|
119 |
+
self.dircache[path] = out
|
120 |
+
except Error:
|
121 |
+
try:
|
122 |
+
info = self.info(path)
|
123 |
+
if info["type"] == "file":
|
124 |
+
out = [(path, info)]
|
125 |
+
except (Error, IndexError):
|
126 |
+
raise FileNotFoundError(path)
|
127 |
+
files = self.dircache.get(path, out)
|
128 |
+
if not detail:
|
129 |
+
return sorted([fn for fn, details in files])
|
130 |
+
return [details for fn, details in files]
|
131 |
+
|
132 |
+
def info(self, path, **kwargs):
|
133 |
+
# implement with direct method
|
134 |
+
path = self._strip_protocol(path)
|
135 |
+
if path == "/":
|
136 |
+
# special case, since this dir has no real entry
|
137 |
+
return {"name": "/", "size": 0, "type": "directory"}
|
138 |
+
files = self.ls(self._parent(path).lstrip("/"), True)
|
139 |
+
try:
|
140 |
+
out = [f for f in files if f["name"] == path][0]
|
141 |
+
except IndexError:
|
142 |
+
raise FileNotFoundError(path)
|
143 |
+
return out
|
144 |
+
|
145 |
+
def get_file(self, rpath, lpath, **kwargs):
|
146 |
+
if self.isdir(rpath):
|
147 |
+
if not os.path.exists(lpath):
|
148 |
+
os.mkdir(lpath)
|
149 |
+
return
|
150 |
+
if isfilelike(lpath):
|
151 |
+
outfile = lpath
|
152 |
+
else:
|
153 |
+
outfile = open(lpath, "wb")
|
154 |
+
|
155 |
+
def cb(x):
|
156 |
+
outfile.write(x)
|
157 |
+
|
158 |
+
self.ftp.retrbinary(
|
159 |
+
f"RETR {rpath}",
|
160 |
+
blocksize=self.blocksize,
|
161 |
+
callback=cb,
|
162 |
+
)
|
163 |
+
if not isfilelike(lpath):
|
164 |
+
outfile.close()
|
165 |
+
|
166 |
+
def cat_file(self, path, start=None, end=None, **kwargs):
|
167 |
+
if end is not None:
|
168 |
+
return super().cat_file(path, start, end, **kwargs)
|
169 |
+
out = []
|
170 |
+
|
171 |
+
def cb(x):
|
172 |
+
out.append(x)
|
173 |
+
|
174 |
+
try:
|
175 |
+
self.ftp.retrbinary(
|
176 |
+
f"RETR {path}",
|
177 |
+
blocksize=self.blocksize,
|
178 |
+
rest=start,
|
179 |
+
callback=cb,
|
180 |
+
)
|
181 |
+
except (Error, error_perm) as orig_exc:
|
182 |
+
raise FileNotFoundError(path) from orig_exc
|
183 |
+
return b"".join(out)
|
184 |
+
|
185 |
+
def _open(
|
186 |
+
self,
|
187 |
+
path,
|
188 |
+
mode="rb",
|
189 |
+
block_size=None,
|
190 |
+
cache_options=None,
|
191 |
+
autocommit=True,
|
192 |
+
**kwargs,
|
193 |
+
):
|
194 |
+
path = self._strip_protocol(path)
|
195 |
+
block_size = block_size or self.blocksize
|
196 |
+
return FTPFile(
|
197 |
+
self,
|
198 |
+
path,
|
199 |
+
mode=mode,
|
200 |
+
block_size=block_size,
|
201 |
+
tempdir=self.tempdir,
|
202 |
+
autocommit=autocommit,
|
203 |
+
cache_options=cache_options,
|
204 |
+
)
|
205 |
+
|
206 |
+
def _rm(self, path):
|
207 |
+
path = self._strip_protocol(path)
|
208 |
+
self.ftp.delete(path)
|
209 |
+
self.invalidate_cache(self._parent(path))
|
210 |
+
|
211 |
+
def rm(self, path, recursive=False, maxdepth=None):
|
212 |
+
paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
|
213 |
+
for p in reversed(paths):
|
214 |
+
if self.isfile(p):
|
215 |
+
self.rm_file(p)
|
216 |
+
else:
|
217 |
+
self.rmdir(p)
|
218 |
+
|
219 |
+
def mkdir(self, path: str, create_parents: bool = True, **kwargs: Any) -> None:
|
220 |
+
path = self._strip_protocol(path)
|
221 |
+
parent = self._parent(path)
|
222 |
+
if parent != self.root_marker and not self.exists(parent) and create_parents:
|
223 |
+
self.mkdir(parent, create_parents=create_parents)
|
224 |
+
|
225 |
+
self.ftp.mkd(path)
|
226 |
+
self.invalidate_cache(self._parent(path))
|
227 |
+
|
228 |
+
def makedirs(self, path: str, exist_ok: bool = False) -> None:
|
229 |
+
path = self._strip_protocol(path)
|
230 |
+
if self.exists(path):
|
231 |
+
# NB: "/" does not "exist" as it has no directory entry
|
232 |
+
if not exist_ok:
|
233 |
+
raise FileExistsError(f"{path} exists without `exist_ok`")
|
234 |
+
# exists_ok=True -> no-op
|
235 |
+
else:
|
236 |
+
self.mkdir(path, create_parents=True)
|
237 |
+
|
238 |
+
def rmdir(self, path):
|
239 |
+
path = self._strip_protocol(path)
|
240 |
+
self.ftp.rmd(path)
|
241 |
+
self.invalidate_cache(self._parent(path))
|
242 |
+
|
243 |
+
def mv(self, path1, path2, **kwargs):
|
244 |
+
path1 = self._strip_protocol(path1)
|
245 |
+
path2 = self._strip_protocol(path2)
|
246 |
+
self.ftp.rename(path1, path2)
|
247 |
+
self.invalidate_cache(self._parent(path1))
|
248 |
+
self.invalidate_cache(self._parent(path2))
|
249 |
+
|
250 |
+
def __del__(self):
|
251 |
+
self.ftp.close()
|
252 |
+
|
253 |
+
def invalidate_cache(self, path=None):
|
254 |
+
if path is None:
|
255 |
+
self.dircache.clear()
|
256 |
+
else:
|
257 |
+
self.dircache.pop(path, None)
|
258 |
+
super().invalidate_cache(path)
|
259 |
+
|
260 |
+
|
261 |
+
class TransferDone(Exception):
|
262 |
+
"""Internal exception to break out of transfer"""
|
263 |
+
|
264 |
+
pass
|
265 |
+
|
266 |
+
|
267 |
+
class FTPFile(AbstractBufferedFile):
|
268 |
+
"""Interact with a remote FTP file with read/write buffering"""
|
269 |
+
|
270 |
+
def __init__(
|
271 |
+
self,
|
272 |
+
fs,
|
273 |
+
path,
|
274 |
+
mode="rb",
|
275 |
+
block_size="default",
|
276 |
+
autocommit=True,
|
277 |
+
cache_type="readahead",
|
278 |
+
cache_options=None,
|
279 |
+
**kwargs,
|
280 |
+
):
|
281 |
+
super().__init__(
|
282 |
+
fs,
|
283 |
+
path,
|
284 |
+
mode=mode,
|
285 |
+
block_size=block_size,
|
286 |
+
autocommit=autocommit,
|
287 |
+
cache_type=cache_type,
|
288 |
+
cache_options=cache_options,
|
289 |
+
**kwargs,
|
290 |
+
)
|
291 |
+
if not autocommit:
|
292 |
+
self.target = self.path
|
293 |
+
self.path = "/".join([kwargs["tempdir"], str(uuid.uuid4())])
|
294 |
+
|
295 |
+
def commit(self):
|
296 |
+
self.fs.mv(self.path, self.target)
|
297 |
+
|
298 |
+
def discard(self):
|
299 |
+
self.fs.rm(self.path)
|
300 |
+
|
301 |
+
def _fetch_range(self, start, end):
|
302 |
+
"""Get bytes between given byte limits
|
303 |
+
|
304 |
+
Implemented by raising an exception in the fetch callback when the
|
305 |
+
number of bytes received reaches the requested amount.
|
306 |
+
|
307 |
+
Will fail if the server does not respect the REST command on
|
308 |
+
retrieve requests.
|
309 |
+
"""
|
310 |
+
out = []
|
311 |
+
total = [0]
|
312 |
+
|
313 |
+
def callback(x):
|
314 |
+
total[0] += len(x)
|
315 |
+
if total[0] > end - start:
|
316 |
+
out.append(x[: (end - start) - total[0]])
|
317 |
+
if end < self.size:
|
318 |
+
raise TransferDone
|
319 |
+
else:
|
320 |
+
out.append(x)
|
321 |
+
|
322 |
+
if total[0] == end - start and end < self.size:
|
323 |
+
raise TransferDone
|
324 |
+
|
325 |
+
try:
|
326 |
+
self.fs.ftp.retrbinary(
|
327 |
+
f"RETR {self.path}",
|
328 |
+
blocksize=self.blocksize,
|
329 |
+
rest=start,
|
330 |
+
callback=callback,
|
331 |
+
)
|
332 |
+
except TransferDone:
|
333 |
+
try:
|
334 |
+
# stop transfer, we got enough bytes for this block
|
335 |
+
self.fs.ftp.abort()
|
336 |
+
self.fs.ftp.getmultiline()
|
337 |
+
except Error:
|
338 |
+
self.fs._connect()
|
339 |
+
|
340 |
+
return b"".join(out)
|
341 |
+
|
342 |
+
def _upload_chunk(self, final=False):
|
343 |
+
self.buffer.seek(0)
|
344 |
+
self.fs.ftp.storbinary(
|
345 |
+
f"STOR {self.path}", self.buffer, blocksize=self.blocksize, rest=self.offset
|
346 |
+
)
|
347 |
+
return True
|
348 |
+
|
349 |
+
|
350 |
+
def _mlsd2(ftp, path="."):
|
351 |
+
"""
|
352 |
+
Fall back to using `dir` instead of `mlsd` if not supported.
|
353 |
+
|
354 |
+
This parses a Linux style `ls -l` response to `dir`, but the response may
|
355 |
+
be platform dependent.
|
356 |
+
|
357 |
+
Parameters
|
358 |
+
----------
|
359 |
+
ftp: ftplib.FTP
|
360 |
+
path: str
|
361 |
+
Expects to be given path, but defaults to ".".
|
362 |
+
"""
|
363 |
+
lines = []
|
364 |
+
minfo = []
|
365 |
+
ftp.dir(path, lines.append)
|
366 |
+
for line in lines:
|
367 |
+
split_line = line.split()
|
368 |
+
if len(split_line) < 9:
|
369 |
+
continue
|
370 |
+
this = (
|
371 |
+
split_line[-1],
|
372 |
+
{
|
373 |
+
"modify": " ".join(split_line[5:8]),
|
374 |
+
"unix.owner": split_line[2],
|
375 |
+
"unix.group": split_line[3],
|
376 |
+
"unix.mode": split_line[0],
|
377 |
+
"size": split_line[4],
|
378 |
+
},
|
379 |
+
)
|
380 |
+
if "d" == this[1]["unix.mode"][0]:
|
381 |
+
this[1]["type"] = "dir"
|
382 |
+
else:
|
383 |
+
this[1]["type"] = "file"
|
384 |
+
minfo.append(this)
|
385 |
+
return minfo
|
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/http.py
ADDED
@@ -0,0 +1,868 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import io
|
3 |
+
import logging
|
4 |
+
import re
|
5 |
+
import weakref
|
6 |
+
from copy import copy
|
7 |
+
from urllib.parse import urlparse
|
8 |
+
|
9 |
+
import aiohttp
|
10 |
+
import yarl
|
11 |
+
|
12 |
+
from fsspec.asyn import AbstractAsyncStreamedFile, AsyncFileSystem, sync, sync_wrapper
|
13 |
+
from fsspec.callbacks import DEFAULT_CALLBACK
|
14 |
+
from fsspec.exceptions import FSTimeoutError
|
15 |
+
from fsspec.spec import AbstractBufferedFile
|
16 |
+
from fsspec.utils import (
|
17 |
+
DEFAULT_BLOCK_SIZE,
|
18 |
+
glob_translate,
|
19 |
+
isfilelike,
|
20 |
+
nullcontext,
|
21 |
+
tokenize,
|
22 |
+
)
|
23 |
+
|
24 |
+
from ..caching import AllBytes
|
25 |
+
|
26 |
+
# https://stackoverflow.com/a/15926317/3821154
|
27 |
+
ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P<url>[^"']+)""")
|
28 |
+
ex2 = re.compile(r"""(?P<url>http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
|
29 |
+
logger = logging.getLogger("fsspec.http")
|
30 |
+
|
31 |
+
|
32 |
+
async def get_client(**kwargs):
|
33 |
+
return aiohttp.ClientSession(**kwargs)
|
34 |
+
|
35 |
+
|
36 |
+
class HTTPFileSystem(AsyncFileSystem):
|
37 |
+
"""
|
38 |
+
Simple File-System for fetching data via HTTP(S)
|
39 |
+
|
40 |
+
``ls()`` is implemented by loading the parent page and doing a regex
|
41 |
+
match on the result. If simple_link=True, anything of the form
|
42 |
+
"http(s)://server.com/stuff?thing=other"; otherwise only links within
|
43 |
+
HTML href tags will be used.
|
44 |
+
"""
|
45 |
+
|
46 |
+
sep = "/"
|
47 |
+
|
48 |
+
def __init__(
|
49 |
+
self,
|
50 |
+
simple_links=True,
|
51 |
+
block_size=None,
|
52 |
+
same_scheme=True,
|
53 |
+
size_policy=None,
|
54 |
+
cache_type="bytes",
|
55 |
+
cache_options=None,
|
56 |
+
asynchronous=False,
|
57 |
+
loop=None,
|
58 |
+
client_kwargs=None,
|
59 |
+
get_client=get_client,
|
60 |
+
encoded=False,
|
61 |
+
**storage_options,
|
62 |
+
):
|
63 |
+
"""
|
64 |
+
NB: if this is called async, you must await set_client
|
65 |
+
|
66 |
+
Parameters
|
67 |
+
----------
|
68 |
+
block_size: int
|
69 |
+
Blocks to read bytes; if 0, will default to raw requests file-like
|
70 |
+
objects instead of HTTPFile instances
|
71 |
+
simple_links: bool
|
72 |
+
If True, will consider both HTML <a> tags and anything that looks
|
73 |
+
like a URL; if False, will consider only the former.
|
74 |
+
same_scheme: True
|
75 |
+
When doing ls/glob, if this is True, only consider paths that have
|
76 |
+
http/https matching the input URLs.
|
77 |
+
size_policy: this argument is deprecated
|
78 |
+
client_kwargs: dict
|
79 |
+
Passed to aiohttp.ClientSession, see
|
80 |
+
https://docs.aiohttp.org/en/stable/client_reference.html
|
81 |
+
For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
|
82 |
+
get_client: Callable[..., aiohttp.ClientSession]
|
83 |
+
A callable which takes keyword arguments and constructs
|
84 |
+
an aiohttp.ClientSession. It's state will be managed by
|
85 |
+
the HTTPFileSystem class.
|
86 |
+
storage_options: key-value
|
87 |
+
Any other parameters passed on to requests
|
88 |
+
cache_type, cache_options: defaults used in open
|
89 |
+
"""
|
90 |
+
super().__init__(self, asynchronous=asynchronous, loop=loop, **storage_options)
|
91 |
+
self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
|
92 |
+
self.simple_links = simple_links
|
93 |
+
self.same_schema = same_scheme
|
94 |
+
self.cache_type = cache_type
|
95 |
+
self.cache_options = cache_options
|
96 |
+
self.client_kwargs = client_kwargs or {}
|
97 |
+
self.get_client = get_client
|
98 |
+
self.encoded = encoded
|
99 |
+
self.kwargs = storage_options
|
100 |
+
self._session = None
|
101 |
+
|
102 |
+
# Clean caching-related parameters from `storage_options`
|
103 |
+
# before propagating them as `request_options` through `self.kwargs`.
|
104 |
+
# TODO: Maybe rename `self.kwargs` to `self.request_options` to make
|
105 |
+
# it clearer.
|
106 |
+
request_options = copy(storage_options)
|
107 |
+
self.use_listings_cache = request_options.pop("use_listings_cache", False)
|
108 |
+
request_options.pop("listings_expiry_time", None)
|
109 |
+
request_options.pop("max_paths", None)
|
110 |
+
request_options.pop("skip_instance_cache", None)
|
111 |
+
self.kwargs = request_options
|
112 |
+
|
113 |
+
@property
|
114 |
+
def fsid(self):
|
115 |
+
return "http"
|
116 |
+
|
117 |
+
def encode_url(self, url):
|
118 |
+
return yarl.URL(url, encoded=self.encoded)
|
119 |
+
|
120 |
+
@staticmethod
|
121 |
+
def close_session(loop, session):
|
122 |
+
if loop is not None and loop.is_running():
|
123 |
+
try:
|
124 |
+
sync(loop, session.close, timeout=0.1)
|
125 |
+
return
|
126 |
+
except (TimeoutError, FSTimeoutError, NotImplementedError):
|
127 |
+
pass
|
128 |
+
connector = getattr(session, "_connector", None)
|
129 |
+
if connector is not None:
|
130 |
+
# close after loop is dead
|
131 |
+
connector._close()
|
132 |
+
|
133 |
+
async def set_session(self):
|
134 |
+
if self._session is None:
|
135 |
+
self._session = await self.get_client(loop=self.loop, **self.client_kwargs)
|
136 |
+
if not self.asynchronous:
|
137 |
+
weakref.finalize(self, self.close_session, self.loop, self._session)
|
138 |
+
return self._session
|
139 |
+
|
140 |
+
@classmethod
|
141 |
+
def _strip_protocol(cls, path):
|
142 |
+
"""For HTTP, we always want to keep the full URL"""
|
143 |
+
return path
|
144 |
+
|
145 |
+
@classmethod
|
146 |
+
def _parent(cls, path):
|
147 |
+
# override, since _strip_protocol is different for URLs
|
148 |
+
par = super()._parent(path)
|
149 |
+
if len(par) > 7: # "http://..."
|
150 |
+
return par
|
151 |
+
return ""
|
152 |
+
|
153 |
+
async def _ls_real(self, url, detail=True, **kwargs):
|
154 |
+
# ignoring URL-encoded arguments
|
155 |
+
kw = self.kwargs.copy()
|
156 |
+
kw.update(kwargs)
|
157 |
+
logger.debug(url)
|
158 |
+
session = await self.set_session()
|
159 |
+
async with session.get(self.encode_url(url), **self.kwargs) as r:
|
160 |
+
self._raise_not_found_for_status(r, url)
|
161 |
+
text = await r.text()
|
162 |
+
if self.simple_links:
|
163 |
+
links = ex2.findall(text) + [u[2] for u in ex.findall(text)]
|
164 |
+
else:
|
165 |
+
links = [u[2] for u in ex.findall(text)]
|
166 |
+
out = set()
|
167 |
+
parts = urlparse(url)
|
168 |
+
for l in links:
|
169 |
+
if isinstance(l, tuple):
|
170 |
+
l = l[1]
|
171 |
+
if l.startswith("/") and len(l) > 1:
|
172 |
+
# absolute URL on this server
|
173 |
+
l = f"{parts.scheme}://{parts.netloc}{l}"
|
174 |
+
if l.startswith("http"):
|
175 |
+
if self.same_schema and l.startswith(url.rstrip("/") + "/"):
|
176 |
+
out.add(l)
|
177 |
+
elif l.replace("https", "http").startswith(
|
178 |
+
url.replace("https", "http").rstrip("/") + "/"
|
179 |
+
):
|
180 |
+
# allowed to cross http <-> https
|
181 |
+
out.add(l)
|
182 |
+
else:
|
183 |
+
if l not in ["..", "../"]:
|
184 |
+
# Ignore FTP-like "parent"
|
185 |
+
out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
|
186 |
+
if not out and url.endswith("/"):
|
187 |
+
out = await self._ls_real(url.rstrip("/"), detail=False)
|
188 |
+
if detail:
|
189 |
+
return [
|
190 |
+
{
|
191 |
+
"name": u,
|
192 |
+
"size": None,
|
193 |
+
"type": "directory" if u.endswith("/") else "file",
|
194 |
+
}
|
195 |
+
for u in out
|
196 |
+
]
|
197 |
+
else:
|
198 |
+
return sorted(out)
|
199 |
+
|
200 |
+
async def _ls(self, url, detail=True, **kwargs):
|
201 |
+
if self.use_listings_cache and url in self.dircache:
|
202 |
+
out = self.dircache[url]
|
203 |
+
else:
|
204 |
+
out = await self._ls_real(url, detail=detail, **kwargs)
|
205 |
+
self.dircache[url] = out
|
206 |
+
return out
|
207 |
+
|
208 |
+
ls = sync_wrapper(_ls)
|
209 |
+
|
210 |
+
def _raise_not_found_for_status(self, response, url):
|
211 |
+
"""
|
212 |
+
Raises FileNotFoundError for 404s, otherwise uses raise_for_status.
|
213 |
+
"""
|
214 |
+
if response.status == 404:
|
215 |
+
raise FileNotFoundError(url)
|
216 |
+
response.raise_for_status()
|
217 |
+
|
218 |
+
async def _cat_file(self, url, start=None, end=None, **kwargs):
|
219 |
+
kw = self.kwargs.copy()
|
220 |
+
kw.update(kwargs)
|
221 |
+
logger.debug(url)
|
222 |
+
|
223 |
+
if start is not None or end is not None:
|
224 |
+
if start == end:
|
225 |
+
return b""
|
226 |
+
headers = kw.pop("headers", {}).copy()
|
227 |
+
|
228 |
+
headers["Range"] = await self._process_limits(url, start, end)
|
229 |
+
kw["headers"] = headers
|
230 |
+
session = await self.set_session()
|
231 |
+
async with session.get(self.encode_url(url), **kw) as r:
|
232 |
+
out = await r.read()
|
233 |
+
self._raise_not_found_for_status(r, url)
|
234 |
+
return out
|
235 |
+
|
236 |
+
async def _get_file(
|
237 |
+
self, rpath, lpath, chunk_size=5 * 2**20, callback=DEFAULT_CALLBACK, **kwargs
|
238 |
+
):
|
239 |
+
kw = self.kwargs.copy()
|
240 |
+
kw.update(kwargs)
|
241 |
+
logger.debug(rpath)
|
242 |
+
session = await self.set_session()
|
243 |
+
async with session.get(self.encode_url(rpath), **kw) as r:
|
244 |
+
try:
|
245 |
+
size = int(r.headers["content-length"])
|
246 |
+
except (ValueError, KeyError):
|
247 |
+
size = None
|
248 |
+
|
249 |
+
callback.set_size(size)
|
250 |
+
self._raise_not_found_for_status(r, rpath)
|
251 |
+
if isfilelike(lpath):
|
252 |
+
outfile = lpath
|
253 |
+
else:
|
254 |
+
outfile = open(lpath, "wb") # noqa: ASYNC101
|
255 |
+
|
256 |
+
try:
|
257 |
+
chunk = True
|
258 |
+
while chunk:
|
259 |
+
chunk = await r.content.read(chunk_size)
|
260 |
+
outfile.write(chunk)
|
261 |
+
callback.relative_update(len(chunk))
|
262 |
+
finally:
|
263 |
+
if not isfilelike(lpath):
|
264 |
+
outfile.close()
|
265 |
+
|
266 |
+
async def _put_file(
|
267 |
+
self,
|
268 |
+
lpath,
|
269 |
+
rpath,
|
270 |
+
chunk_size=5 * 2**20,
|
271 |
+
callback=DEFAULT_CALLBACK,
|
272 |
+
method="post",
|
273 |
+
**kwargs,
|
274 |
+
):
|
275 |
+
async def gen_chunks():
|
276 |
+
# Support passing arbitrary file-like objects
|
277 |
+
# and use them instead of streams.
|
278 |
+
if isinstance(lpath, io.IOBase):
|
279 |
+
context = nullcontext(lpath)
|
280 |
+
use_seek = False # might not support seeking
|
281 |
+
else:
|
282 |
+
context = open(lpath, "rb") # noqa: ASYNC101
|
283 |
+
use_seek = True
|
284 |
+
|
285 |
+
with context as f:
|
286 |
+
if use_seek:
|
287 |
+
callback.set_size(f.seek(0, 2))
|
288 |
+
f.seek(0)
|
289 |
+
else:
|
290 |
+
callback.set_size(getattr(f, "size", None))
|
291 |
+
|
292 |
+
chunk = f.read(chunk_size)
|
293 |
+
while chunk:
|
294 |
+
yield chunk
|
295 |
+
callback.relative_update(len(chunk))
|
296 |
+
chunk = f.read(chunk_size)
|
297 |
+
|
298 |
+
kw = self.kwargs.copy()
|
299 |
+
kw.update(kwargs)
|
300 |
+
session = await self.set_session()
|
301 |
+
|
302 |
+
method = method.lower()
|
303 |
+
if method not in ("post", "put"):
|
304 |
+
raise ValueError(
|
305 |
+
f"method has to be either 'post' or 'put', not: {method!r}"
|
306 |
+
)
|
307 |
+
|
308 |
+
meth = getattr(session, method)
|
309 |
+
async with meth(self.encode_url(rpath), data=gen_chunks(), **kw) as resp:
|
310 |
+
self._raise_not_found_for_status(resp, rpath)
|
311 |
+
|
312 |
+
async def _exists(self, path, **kwargs):
|
313 |
+
kw = self.kwargs.copy()
|
314 |
+
kw.update(kwargs)
|
315 |
+
try:
|
316 |
+
logger.debug(path)
|
317 |
+
session = await self.set_session()
|
318 |
+
r = await session.get(self.encode_url(path), **kw)
|
319 |
+
async with r:
|
320 |
+
return r.status < 400
|
321 |
+
except aiohttp.ClientError:
|
322 |
+
return False
|
323 |
+
|
324 |
+
async def _isfile(self, path, **kwargs):
|
325 |
+
return await self._exists(path, **kwargs)
|
326 |
+
|
327 |
+
def _open(
|
328 |
+
self,
|
329 |
+
path,
|
330 |
+
mode="rb",
|
331 |
+
block_size=None,
|
332 |
+
autocommit=None, # XXX: This differs from the base class.
|
333 |
+
cache_type=None,
|
334 |
+
cache_options=None,
|
335 |
+
size=None,
|
336 |
+
**kwargs,
|
337 |
+
):
|
338 |
+
"""Make a file-like object
|
339 |
+
|
340 |
+
Parameters
|
341 |
+
----------
|
342 |
+
path: str
|
343 |
+
Full URL with protocol
|
344 |
+
mode: string
|
345 |
+
must be "rb"
|
346 |
+
block_size: int or None
|
347 |
+
Bytes to download in one request; use instance value if None. If
|
348 |
+
zero, will return a streaming Requests file-like instance.
|
349 |
+
kwargs: key-value
|
350 |
+
Any other parameters, passed to requests calls
|
351 |
+
"""
|
352 |
+
if mode != "rb":
|
353 |
+
raise NotImplementedError
|
354 |
+
block_size = block_size if block_size is not None else self.block_size
|
355 |
+
kw = self.kwargs.copy()
|
356 |
+
kw["asynchronous"] = self.asynchronous
|
357 |
+
kw.update(kwargs)
|
358 |
+
size = size or self.info(path, **kwargs)["size"]
|
359 |
+
session = sync(self.loop, self.set_session)
|
360 |
+
if block_size and size:
|
361 |
+
return HTTPFile(
|
362 |
+
self,
|
363 |
+
path,
|
364 |
+
session=session,
|
365 |
+
block_size=block_size,
|
366 |
+
mode=mode,
|
367 |
+
size=size,
|
368 |
+
cache_type=cache_type or self.cache_type,
|
369 |
+
cache_options=cache_options or self.cache_options,
|
370 |
+
loop=self.loop,
|
371 |
+
**kw,
|
372 |
+
)
|
373 |
+
else:
|
374 |
+
return HTTPStreamFile(
|
375 |
+
self,
|
376 |
+
path,
|
377 |
+
mode=mode,
|
378 |
+
loop=self.loop,
|
379 |
+
session=session,
|
380 |
+
**kw,
|
381 |
+
)
|
382 |
+
|
383 |
+
async def open_async(self, path, mode="rb", size=None, **kwargs):
|
384 |
+
session = await self.set_session()
|
385 |
+
if size is None:
|
386 |
+
try:
|
387 |
+
size = (await self._info(path, **kwargs))["size"]
|
388 |
+
except FileNotFoundError:
|
389 |
+
pass
|
390 |
+
return AsyncStreamFile(
|
391 |
+
self,
|
392 |
+
path,
|
393 |
+
loop=self.loop,
|
394 |
+
session=session,
|
395 |
+
size=size,
|
396 |
+
**kwargs,
|
397 |
+
)
|
398 |
+
|
399 |
+
def ukey(self, url):
|
400 |
+
"""Unique identifier; assume HTTP files are static, unchanging"""
|
401 |
+
return tokenize(url, self.kwargs, self.protocol)
|
402 |
+
|
403 |
+
async def _info(self, url, **kwargs):
|
404 |
+
"""Get info of URL
|
405 |
+
|
406 |
+
Tries to access location via HEAD, and then GET methods, but does
|
407 |
+
not fetch the data.
|
408 |
+
|
409 |
+
It is possible that the server does not supply any size information, in
|
410 |
+
which case size will be given as None (and certain operations on the
|
411 |
+
corresponding file will not work).
|
412 |
+
"""
|
413 |
+
info = {}
|
414 |
+
session = await self.set_session()
|
415 |
+
|
416 |
+
for policy in ["head", "get"]:
|
417 |
+
try:
|
418 |
+
info.update(
|
419 |
+
await _file_info(
|
420 |
+
self.encode_url(url),
|
421 |
+
size_policy=policy,
|
422 |
+
session=session,
|
423 |
+
**self.kwargs,
|
424 |
+
**kwargs,
|
425 |
+
)
|
426 |
+
)
|
427 |
+
if info.get("size") is not None:
|
428 |
+
break
|
429 |
+
except Exception as exc:
|
430 |
+
if policy == "get":
|
431 |
+
# If get failed, then raise a FileNotFoundError
|
432 |
+
raise FileNotFoundError(url) from exc
|
433 |
+
logger.debug(str(exc))
|
434 |
+
|
435 |
+
return {"name": url, "size": None, **info, "type": "file"}
|
436 |
+
|
437 |
+
async def _glob(self, path, maxdepth=None, **kwargs):
|
438 |
+
"""
|
439 |
+
Find files by glob-matching.
|
440 |
+
|
441 |
+
This implementation is idntical to the one in AbstractFileSystem,
|
442 |
+
but "?" is not considered as a character for globbing, because it is
|
443 |
+
so common in URLs, often identifying the "query" part.
|
444 |
+
"""
|
445 |
+
if maxdepth is not None and maxdepth < 1:
|
446 |
+
raise ValueError("maxdepth must be at least 1")
|
447 |
+
import re
|
448 |
+
|
449 |
+
ends_with_slash = path.endswith("/") # _strip_protocol strips trailing slash
|
450 |
+
path = self._strip_protocol(path)
|
451 |
+
append_slash_to_dirname = ends_with_slash or path.endswith("/**")
|
452 |
+
idx_star = path.find("*") if path.find("*") >= 0 else len(path)
|
453 |
+
idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
|
454 |
+
|
455 |
+
min_idx = min(idx_star, idx_brace)
|
456 |
+
|
457 |
+
detail = kwargs.pop("detail", False)
|
458 |
+
|
459 |
+
if not has_magic(path):
|
460 |
+
if await self._exists(path, **kwargs):
|
461 |
+
if not detail:
|
462 |
+
return [path]
|
463 |
+
else:
|
464 |
+
return {path: await self._info(path, **kwargs)}
|
465 |
+
else:
|
466 |
+
if not detail:
|
467 |
+
return [] # glob of non-existent returns empty
|
468 |
+
else:
|
469 |
+
return {}
|
470 |
+
elif "/" in path[:min_idx]:
|
471 |
+
min_idx = path[:min_idx].rindex("/")
|
472 |
+
root = path[: min_idx + 1]
|
473 |
+
depth = path[min_idx + 1 :].count("/") + 1
|
474 |
+
else:
|
475 |
+
root = ""
|
476 |
+
depth = path[min_idx + 1 :].count("/") + 1
|
477 |
+
|
478 |
+
if "**" in path:
|
479 |
+
if maxdepth is not None:
|
480 |
+
idx_double_stars = path.find("**")
|
481 |
+
depth_double_stars = path[idx_double_stars:].count("/") + 1
|
482 |
+
depth = depth - depth_double_stars + maxdepth
|
483 |
+
else:
|
484 |
+
depth = None
|
485 |
+
|
486 |
+
allpaths = await self._find(
|
487 |
+
root, maxdepth=depth, withdirs=True, detail=True, **kwargs
|
488 |
+
)
|
489 |
+
|
490 |
+
pattern = glob_translate(path + ("/" if ends_with_slash else ""))
|
491 |
+
pattern = re.compile(pattern)
|
492 |
+
|
493 |
+
out = {
|
494 |
+
p: info
|
495 |
+
for p, info in sorted(allpaths.items())
|
496 |
+
if pattern.match(
|
497 |
+
(
|
498 |
+
p + "/"
|
499 |
+
if append_slash_to_dirname and info["type"] == "directory"
|
500 |
+
else p
|
501 |
+
)
|
502 |
+
)
|
503 |
+
}
|
504 |
+
|
505 |
+
if detail:
|
506 |
+
return out
|
507 |
+
else:
|
508 |
+
return list(out)
|
509 |
+
|
510 |
+
async def _isdir(self, path):
|
511 |
+
# override, since all URLs are (also) files
|
512 |
+
try:
|
513 |
+
return bool(await self._ls(path))
|
514 |
+
except (FileNotFoundError, ValueError):
|
515 |
+
return False
|
516 |
+
|
517 |
+
|
518 |
+
class HTTPFile(AbstractBufferedFile):
|
519 |
+
"""
|
520 |
+
A file-like object pointing to a remove HTTP(S) resource
|
521 |
+
|
522 |
+
Supports only reading, with read-ahead of a predermined block-size.
|
523 |
+
|
524 |
+
In the case that the server does not supply the filesize, only reading of
|
525 |
+
the complete file in one go is supported.
|
526 |
+
|
527 |
+
Parameters
|
528 |
+
----------
|
529 |
+
url: str
|
530 |
+
Full URL of the remote resource, including the protocol
|
531 |
+
session: aiohttp.ClientSession or None
|
532 |
+
All calls will be made within this session, to avoid restarting
|
533 |
+
connections where the server allows this
|
534 |
+
block_size: int or None
|
535 |
+
The amount of read-ahead to do, in bytes. Default is 5MB, or the value
|
536 |
+
configured for the FileSystem creating this file
|
537 |
+
size: None or int
|
538 |
+
If given, this is the size of the file in bytes, and we don't attempt
|
539 |
+
to call the server to find the value.
|
540 |
+
kwargs: all other key-values are passed to requests calls.
|
541 |
+
"""
|
542 |
+
|
543 |
+
def __init__(
|
544 |
+
self,
|
545 |
+
fs,
|
546 |
+
url,
|
547 |
+
session=None,
|
548 |
+
block_size=None,
|
549 |
+
mode="rb",
|
550 |
+
cache_type="bytes",
|
551 |
+
cache_options=None,
|
552 |
+
size=None,
|
553 |
+
loop=None,
|
554 |
+
asynchronous=False,
|
555 |
+
**kwargs,
|
556 |
+
):
|
557 |
+
if mode != "rb":
|
558 |
+
raise NotImplementedError("File mode not supported")
|
559 |
+
self.asynchronous = asynchronous
|
560 |
+
self.url = url
|
561 |
+
self.session = session
|
562 |
+
self.details = {"name": url, "size": size, "type": "file"}
|
563 |
+
super().__init__(
|
564 |
+
fs=fs,
|
565 |
+
path=url,
|
566 |
+
mode=mode,
|
567 |
+
block_size=block_size,
|
568 |
+
cache_type=cache_type,
|
569 |
+
cache_options=cache_options,
|
570 |
+
**kwargs,
|
571 |
+
)
|
572 |
+
self.loop = loop
|
573 |
+
|
574 |
+
def read(self, length=-1):
|
575 |
+
"""Read bytes from file
|
576 |
+
|
577 |
+
Parameters
|
578 |
+
----------
|
579 |
+
length: int
|
580 |
+
Read up to this many bytes. If negative, read all content to end of
|
581 |
+
file. If the server has not supplied the filesize, attempting to
|
582 |
+
read only part of the data will raise a ValueError.
|
583 |
+
"""
|
584 |
+
if (
|
585 |
+
(length < 0 and self.loc == 0) # explicit read all
|
586 |
+
# but not when the size is known and fits into a block anyways
|
587 |
+
and not (self.size is not None and self.size <= self.blocksize)
|
588 |
+
):
|
589 |
+
self._fetch_all()
|
590 |
+
if self.size is None:
|
591 |
+
if length < 0:
|
592 |
+
self._fetch_all()
|
593 |
+
else:
|
594 |
+
length = min(self.size - self.loc, length)
|
595 |
+
return super().read(length)
|
596 |
+
|
597 |
+
async def async_fetch_all(self):
|
598 |
+
"""Read whole file in one shot, without caching
|
599 |
+
|
600 |
+
This is only called when position is still at zero,
|
601 |
+
and read() is called without a byte-count.
|
602 |
+
"""
|
603 |
+
logger.debug(f"Fetch all for {self}")
|
604 |
+
if not isinstance(self.cache, AllBytes):
|
605 |
+
r = await self.session.get(self.fs.encode_url(self.url), **self.kwargs)
|
606 |
+
async with r:
|
607 |
+
r.raise_for_status()
|
608 |
+
out = await r.read()
|
609 |
+
self.cache = AllBytes(
|
610 |
+
size=len(out), fetcher=None, blocksize=None, data=out
|
611 |
+
)
|
612 |
+
self.size = len(out)
|
613 |
+
|
614 |
+
_fetch_all = sync_wrapper(async_fetch_all)
|
615 |
+
|
616 |
+
def _parse_content_range(self, headers):
|
617 |
+
"""Parse the Content-Range header"""
|
618 |
+
s = headers.get("Content-Range", "")
|
619 |
+
m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s)
|
620 |
+
if not m:
|
621 |
+
return None, None, None
|
622 |
+
|
623 |
+
if m[1] == "*":
|
624 |
+
start = end = None
|
625 |
+
else:
|
626 |
+
start, end = [int(x) for x in m[1].split("-")]
|
627 |
+
total = None if m[2] == "*" else int(m[2])
|
628 |
+
return start, end, total
|
629 |
+
|
630 |
+
async def async_fetch_range(self, start, end):
|
631 |
+
"""Download a block of data
|
632 |
+
|
633 |
+
The expectation is that the server returns only the requested bytes,
|
634 |
+
with HTTP code 206. If this is not the case, we first check the headers,
|
635 |
+
and then stream the output - if the data size is bigger than we
|
636 |
+
requested, an exception is raised.
|
637 |
+
"""
|
638 |
+
logger.debug(f"Fetch range for {self}: {start}-{end}")
|
639 |
+
kwargs = self.kwargs.copy()
|
640 |
+
headers = kwargs.pop("headers", {}).copy()
|
641 |
+
headers["Range"] = f"bytes={start}-{end - 1}"
|
642 |
+
logger.debug(f"{self.url} : {headers['Range']}")
|
643 |
+
r = await self.session.get(
|
644 |
+
self.fs.encode_url(self.url), headers=headers, **kwargs
|
645 |
+
)
|
646 |
+
async with r:
|
647 |
+
if r.status == 416:
|
648 |
+
# range request outside file
|
649 |
+
return b""
|
650 |
+
r.raise_for_status()
|
651 |
+
|
652 |
+
# If the server has handled the range request, it should reply
|
653 |
+
# with status 206 (partial content). But we'll guess that a suitable
|
654 |
+
# Content-Range header or a Content-Length no more than the
|
655 |
+
# requested range also mean we have got the desired range.
|
656 |
+
response_is_range = (
|
657 |
+
r.status == 206
|
658 |
+
or self._parse_content_range(r.headers)[0] == start
|
659 |
+
or int(r.headers.get("Content-Length", end + 1)) <= end - start
|
660 |
+
)
|
661 |
+
|
662 |
+
if response_is_range:
|
663 |
+
# partial content, as expected
|
664 |
+
out = await r.read()
|
665 |
+
elif start > 0:
|
666 |
+
raise ValueError(
|
667 |
+
"The HTTP server doesn't appear to support range requests. "
|
668 |
+
"Only reading this file from the beginning is supported. "
|
669 |
+
"Open with block_size=0 for a streaming file interface."
|
670 |
+
)
|
671 |
+
else:
|
672 |
+
# Response is not a range, but we want the start of the file,
|
673 |
+
# so we can read the required amount anyway.
|
674 |
+
cl = 0
|
675 |
+
out = []
|
676 |
+
while True:
|
677 |
+
chunk = await r.content.read(2**20)
|
678 |
+
# data size unknown, let's read until we have enough
|
679 |
+
if chunk:
|
680 |
+
out.append(chunk)
|
681 |
+
cl += len(chunk)
|
682 |
+
if cl > end - start:
|
683 |
+
break
|
684 |
+
else:
|
685 |
+
break
|
686 |
+
out = b"".join(out)[: end - start]
|
687 |
+
return out
|
688 |
+
|
689 |
+
_fetch_range = sync_wrapper(async_fetch_range)
|
690 |
+
|
691 |
+
def __reduce__(self):
|
692 |
+
return (
|
693 |
+
reopen,
|
694 |
+
(
|
695 |
+
self.fs,
|
696 |
+
self.url,
|
697 |
+
self.mode,
|
698 |
+
self.blocksize,
|
699 |
+
self.cache.name if self.cache else "none",
|
700 |
+
self.size,
|
701 |
+
),
|
702 |
+
)
|
703 |
+
|
704 |
+
|
705 |
+
def reopen(fs, url, mode, blocksize, cache_type, size=None):
|
706 |
+
return fs.open(
|
707 |
+
url, mode=mode, block_size=blocksize, cache_type=cache_type, size=size
|
708 |
+
)
|
709 |
+
|
710 |
+
|
711 |
+
magic_check = re.compile("([*[])")
|
712 |
+
|
713 |
+
|
714 |
+
def has_magic(s):
|
715 |
+
match = magic_check.search(s)
|
716 |
+
return match is not None
|
717 |
+
|
718 |
+
|
719 |
+
class HTTPStreamFile(AbstractBufferedFile):
|
720 |
+
def __init__(self, fs, url, mode="rb", loop=None, session=None, **kwargs):
|
721 |
+
self.asynchronous = kwargs.pop("asynchronous", False)
|
722 |
+
self.url = url
|
723 |
+
self.loop = loop
|
724 |
+
self.session = session
|
725 |
+
if mode != "rb":
|
726 |
+
raise ValueError
|
727 |
+
self.details = {"name": url, "size": None}
|
728 |
+
super().__init__(fs=fs, path=url, mode=mode, cache_type="none", **kwargs)
|
729 |
+
|
730 |
+
async def cor():
|
731 |
+
r = await self.session.get(self.fs.encode_url(url), **kwargs).__aenter__()
|
732 |
+
self.fs._raise_not_found_for_status(r, url)
|
733 |
+
return r
|
734 |
+
|
735 |
+
self.r = sync(self.loop, cor)
|
736 |
+
|
737 |
+
def seek(self, loc, whence=0):
|
738 |
+
if loc == 0 and whence == 1:
|
739 |
+
return
|
740 |
+
if loc == self.loc and whence == 0:
|
741 |
+
return
|
742 |
+
raise ValueError("Cannot seek streaming HTTP file")
|
743 |
+
|
744 |
+
async def _read(self, num=-1):
|
745 |
+
out = await self.r.content.read(num)
|
746 |
+
self.loc += len(out)
|
747 |
+
return out
|
748 |
+
|
749 |
+
read = sync_wrapper(_read)
|
750 |
+
|
751 |
+
async def _close(self):
|
752 |
+
self.r.close()
|
753 |
+
|
754 |
+
def close(self):
|
755 |
+
asyncio.run_coroutine_threadsafe(self._close(), self.loop)
|
756 |
+
super().close()
|
757 |
+
|
758 |
+
def __reduce__(self):
|
759 |
+
return reopen, (self.fs, self.url, self.mode, self.blocksize, self.cache.name)
|
760 |
+
|
761 |
+
|
762 |
+
class AsyncStreamFile(AbstractAsyncStreamedFile):
|
763 |
+
def __init__(
|
764 |
+
self, fs, url, mode="rb", loop=None, session=None, size=None, **kwargs
|
765 |
+
):
|
766 |
+
self.url = url
|
767 |
+
self.session = session
|
768 |
+
self.r = None
|
769 |
+
if mode != "rb":
|
770 |
+
raise ValueError
|
771 |
+
self.details = {"name": url, "size": None}
|
772 |
+
self.kwargs = kwargs
|
773 |
+
super().__init__(fs=fs, path=url, mode=mode, cache_type="none")
|
774 |
+
self.size = size
|
775 |
+
|
776 |
+
async def read(self, num=-1):
|
777 |
+
if self.r is None:
|
778 |
+
r = await self.session.get(
|
779 |
+
self.fs.encode_url(self.url), **self.kwargs
|
780 |
+
).__aenter__()
|
781 |
+
self.fs._raise_not_found_for_status(r, self.url)
|
782 |
+
self.r = r
|
783 |
+
out = await self.r.content.read(num)
|
784 |
+
self.loc += len(out)
|
785 |
+
return out
|
786 |
+
|
787 |
+
async def close(self):
|
788 |
+
if self.r is not None:
|
789 |
+
self.r.close()
|
790 |
+
self.r = None
|
791 |
+
await super().close()
|
792 |
+
|
793 |
+
|
794 |
+
async def get_range(session, url, start, end, file=None, **kwargs):
|
795 |
+
# explicit get a range when we know it must be safe
|
796 |
+
kwargs = kwargs.copy()
|
797 |
+
headers = kwargs.pop("headers", {}).copy()
|
798 |
+
headers["Range"] = f"bytes={start}-{end - 1}"
|
799 |
+
r = await session.get(url, headers=headers, **kwargs)
|
800 |
+
r.raise_for_status()
|
801 |
+
async with r:
|
802 |
+
out = await r.read()
|
803 |
+
if file:
|
804 |
+
with open(file, "r+b") as f: # noqa: ASYNC101
|
805 |
+
f.seek(start)
|
806 |
+
f.write(out)
|
807 |
+
else:
|
808 |
+
return out
|
809 |
+
|
810 |
+
|
811 |
+
async def _file_info(url, session, size_policy="head", **kwargs):
|
812 |
+
"""Call HEAD on the server to get details about the file (size/checksum etc.)
|
813 |
+
|
814 |
+
Default operation is to explicitly allow redirects and use encoding
|
815 |
+
'identity' (no compression) to get the true size of the target.
|
816 |
+
"""
|
817 |
+
logger.debug("Retrieve file size for %s", url)
|
818 |
+
kwargs = kwargs.copy()
|
819 |
+
ar = kwargs.pop("allow_redirects", True)
|
820 |
+
head = kwargs.get("headers", {}).copy()
|
821 |
+
head["Accept-Encoding"] = "identity"
|
822 |
+
kwargs["headers"] = head
|
823 |
+
|
824 |
+
info = {}
|
825 |
+
if size_policy == "head":
|
826 |
+
r = await session.head(url, allow_redirects=ar, **kwargs)
|
827 |
+
elif size_policy == "get":
|
828 |
+
r = await session.get(url, allow_redirects=ar, **kwargs)
|
829 |
+
else:
|
830 |
+
raise TypeError(f'size_policy must be "head" or "get", got {size_policy}')
|
831 |
+
async with r:
|
832 |
+
r.raise_for_status()
|
833 |
+
|
834 |
+
# TODO:
|
835 |
+
# recognise lack of 'Accept-Ranges',
|
836 |
+
# or 'Accept-Ranges': 'none' (not 'bytes')
|
837 |
+
# to mean streaming only, no random access => return None
|
838 |
+
if "Content-Length" in r.headers:
|
839 |
+
# Some servers may choose to ignore Accept-Encoding and return
|
840 |
+
# compressed content, in which case the returned size is unreliable.
|
841 |
+
if "Content-Encoding" not in r.headers or r.headers["Content-Encoding"] in [
|
842 |
+
"identity",
|
843 |
+
"",
|
844 |
+
]:
|
845 |
+
info["size"] = int(r.headers["Content-Length"])
|
846 |
+
elif "Content-Range" in r.headers:
|
847 |
+
info["size"] = int(r.headers["Content-Range"].split("/")[1])
|
848 |
+
|
849 |
+
if "Content-Type" in r.headers:
|
850 |
+
info["mimetype"] = r.headers["Content-Type"].partition(";")[0]
|
851 |
+
|
852 |
+
info["url"] = str(r.url)
|
853 |
+
|
854 |
+
for checksum_field in ["ETag", "Content-MD5", "Digest"]:
|
855 |
+
if r.headers.get(checksum_field):
|
856 |
+
info[checksum_field] = r.headers[checksum_field]
|
857 |
+
|
858 |
+
return info
|
859 |
+
|
860 |
+
|
861 |
+
async def _file_size(url, session=None, *args, **kwargs):
|
862 |
+
if session is None:
|
863 |
+
session = await get_client()
|
864 |
+
info = await _file_info(url, session=session, *args, **kwargs)
|
865 |
+
return info.get("size")
|
866 |
+
|
867 |
+
|
868 |
+
file_size = sync_wrapper(_file_size)
|
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/jupyter.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import io
|
3 |
+
import re
|
4 |
+
|
5 |
+
import requests
|
6 |
+
|
7 |
+
import fsspec
|
8 |
+
|
9 |
+
|
10 |
+
class JupyterFileSystem(fsspec.AbstractFileSystem):
|
11 |
+
"""View of the files as seen by a Jupyter server (notebook or lab)"""
|
12 |
+
|
13 |
+
protocol = ("jupyter", "jlab")
|
14 |
+
|
15 |
+
def __init__(self, url, tok=None, **kwargs):
|
16 |
+
"""
|
17 |
+
|
18 |
+
Parameters
|
19 |
+
----------
|
20 |
+
url : str
|
21 |
+
Base URL of the server, like "http://127.0.0.1:8888". May include
|
22 |
+
token in the string, which is given by the process when starting up
|
23 |
+
tok : str
|
24 |
+
If the token is obtained separately, can be given here
|
25 |
+
kwargs
|
26 |
+
"""
|
27 |
+
if "?" in url:
|
28 |
+
if tok is None:
|
29 |
+
try:
|
30 |
+
tok = re.findall("token=([a-z0-9]+)", url)[0]
|
31 |
+
except IndexError as e:
|
32 |
+
raise ValueError("Could not determine token") from e
|
33 |
+
url = url.split("?", 1)[0]
|
34 |
+
self.url = url.rstrip("/") + "/api/contents"
|
35 |
+
self.session = requests.Session()
|
36 |
+
if tok:
|
37 |
+
self.session.headers["Authorization"] = f"token {tok}"
|
38 |
+
|
39 |
+
super().__init__(**kwargs)
|
40 |
+
|
41 |
+
def ls(self, path, detail=True, **kwargs):
|
42 |
+
path = self._strip_protocol(path)
|
43 |
+
r = self.session.get(f"{self.url}/{path}")
|
44 |
+
if r.status_code == 404:
|
45 |
+
return FileNotFoundError(path)
|
46 |
+
r.raise_for_status()
|
47 |
+
out = r.json()
|
48 |
+
|
49 |
+
if out["type"] == "directory":
|
50 |
+
out = out["content"]
|
51 |
+
else:
|
52 |
+
out = [out]
|
53 |
+
for o in out:
|
54 |
+
o["name"] = o.pop("path")
|
55 |
+
o.pop("content")
|
56 |
+
if o["type"] == "notebook":
|
57 |
+
o["type"] = "file"
|
58 |
+
if detail:
|
59 |
+
return out
|
60 |
+
return [o["name"] for o in out]
|
61 |
+
|
62 |
+
def cat_file(self, path, start=None, end=None, **kwargs):
|
63 |
+
path = self._strip_protocol(path)
|
64 |
+
r = self.session.get(f"{self.url}/{path}")
|
65 |
+
if r.status_code == 404:
|
66 |
+
return FileNotFoundError(path)
|
67 |
+
r.raise_for_status()
|
68 |
+
out = r.json()
|
69 |
+
if out["format"] == "text":
|
70 |
+
# data should be binary
|
71 |
+
b = out["content"].encode()
|
72 |
+
else:
|
73 |
+
b = base64.b64decode(out["content"])
|
74 |
+
return b[start:end]
|
75 |
+
|
76 |
+
def pipe_file(self, path, value, **_):
|
77 |
+
path = self._strip_protocol(path)
|
78 |
+
json = {
|
79 |
+
"name": path.rsplit("/", 1)[-1],
|
80 |
+
"path": path,
|
81 |
+
"size": len(value),
|
82 |
+
"content": base64.b64encode(value).decode(),
|
83 |
+
"format": "base64",
|
84 |
+
"type": "file",
|
85 |
+
}
|
86 |
+
self.session.put(f"{self.url}/{path}", json=json)
|
87 |
+
|
88 |
+
def mkdir(self, path, create_parents=True, **kwargs):
|
89 |
+
path = self._strip_protocol(path)
|
90 |
+
if create_parents and "/" in path:
|
91 |
+
self.mkdir(path.rsplit("/", 1)[0], True)
|
92 |
+
json = {
|
93 |
+
"name": path.rsplit("/", 1)[-1],
|
94 |
+
"path": path,
|
95 |
+
"size": None,
|
96 |
+
"content": None,
|
97 |
+
"type": "directory",
|
98 |
+
}
|
99 |
+
self.session.put(f"{self.url}/{path}", json=json)
|
100 |
+
|
101 |
+
def _rm(self, path):
|
102 |
+
path = self._strip_protocol(path)
|
103 |
+
self.session.delete(f"{self.url}/{path}")
|
104 |
+
|
105 |
+
def _open(self, path, mode="rb", **kwargs):
|
106 |
+
path = self._strip_protocol(path)
|
107 |
+
if mode == "rb":
|
108 |
+
data = self.cat_file(path)
|
109 |
+
return io.BytesIO(data)
|
110 |
+
else:
|
111 |
+
return SimpleFileWriter(self, path, mode="wb")
|
112 |
+
|
113 |
+
|
114 |
+
class SimpleFileWriter(fsspec.spec.AbstractBufferedFile):
|
115 |
+
def _upload_chunk(self, final=False):
|
116 |
+
"""Never uploads a chunk until file is done
|
117 |
+
|
118 |
+
Not suitable for large files
|
119 |
+
"""
|
120 |
+
if final is False:
|
121 |
+
return False
|
122 |
+
self.buffer.seek(0)
|
123 |
+
data = self.buffer.read()
|
124 |
+
self.fs.pipe_file(self.path, data)
|
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/memory.py
ADDED
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from datetime import datetime, timezone
|
5 |
+
from errno import ENOTEMPTY
|
6 |
+
from io import BytesIO
|
7 |
+
from typing import Any, ClassVar
|
8 |
+
|
9 |
+
from fsspec import AbstractFileSystem
|
10 |
+
|
11 |
+
logger = logging.getLogger("fsspec.memoryfs")
|
12 |
+
|
13 |
+
|
14 |
+
class MemoryFileSystem(AbstractFileSystem):
|
15 |
+
"""A filesystem based on a dict of BytesIO objects
|
16 |
+
|
17 |
+
This is a global filesystem so instances of this class all point to the same
|
18 |
+
in memory filesystem.
|
19 |
+
"""
|
20 |
+
|
21 |
+
store: ClassVar[dict[str, Any]] = {} # global, do not overwrite!
|
22 |
+
pseudo_dirs = [""] # global, do not overwrite!
|
23 |
+
protocol = "memory"
|
24 |
+
root_marker = "/"
|
25 |
+
|
26 |
+
@classmethod
|
27 |
+
def _strip_protocol(cls, path):
|
28 |
+
if path.startswith("memory://"):
|
29 |
+
path = path[len("memory://") :]
|
30 |
+
if "::" in path or "://" in path:
|
31 |
+
return path.rstrip("/")
|
32 |
+
path = path.lstrip("/").rstrip("/")
|
33 |
+
return "/" + path if path else ""
|
34 |
+
|
35 |
+
def ls(self, path, detail=True, **kwargs):
|
36 |
+
path = self._strip_protocol(path)
|
37 |
+
if path in self.store:
|
38 |
+
# there is a key with this exact name
|
39 |
+
if not detail:
|
40 |
+
return [path]
|
41 |
+
return [
|
42 |
+
{
|
43 |
+
"name": path,
|
44 |
+
"size": self.store[path].size,
|
45 |
+
"type": "file",
|
46 |
+
"created": self.store[path].created.timestamp(),
|
47 |
+
}
|
48 |
+
]
|
49 |
+
paths = set()
|
50 |
+
starter = path + "/"
|
51 |
+
out = []
|
52 |
+
for p2 in tuple(self.store):
|
53 |
+
if p2.startswith(starter):
|
54 |
+
if "/" not in p2[len(starter) :]:
|
55 |
+
# exact child
|
56 |
+
out.append(
|
57 |
+
{
|
58 |
+
"name": p2,
|
59 |
+
"size": self.store[p2].size,
|
60 |
+
"type": "file",
|
61 |
+
"created": self.store[p2].created.timestamp(),
|
62 |
+
}
|
63 |
+
)
|
64 |
+
elif len(p2) > len(starter):
|
65 |
+
# implied child directory
|
66 |
+
ppath = starter + p2[len(starter) :].split("/", 1)[0]
|
67 |
+
if ppath not in paths:
|
68 |
+
out = out or []
|
69 |
+
out.append(
|
70 |
+
{
|
71 |
+
"name": ppath,
|
72 |
+
"size": 0,
|
73 |
+
"type": "directory",
|
74 |
+
}
|
75 |
+
)
|
76 |
+
paths.add(ppath)
|
77 |
+
for p2 in self.pseudo_dirs:
|
78 |
+
if p2.startswith(starter):
|
79 |
+
if "/" not in p2[len(starter) :]:
|
80 |
+
# exact child pdir
|
81 |
+
if p2 not in paths:
|
82 |
+
out.append({"name": p2, "size": 0, "type": "directory"})
|
83 |
+
paths.add(p2)
|
84 |
+
else:
|
85 |
+
# directory implied by deeper pdir
|
86 |
+
ppath = starter + p2[len(starter) :].split("/", 1)[0]
|
87 |
+
if ppath not in paths:
|
88 |
+
out.append({"name": ppath, "size": 0, "type": "directory"})
|
89 |
+
paths.add(ppath)
|
90 |
+
if not out:
|
91 |
+
if path in self.pseudo_dirs:
|
92 |
+
# empty dir
|
93 |
+
return []
|
94 |
+
raise FileNotFoundError(path)
|
95 |
+
if detail:
|
96 |
+
return out
|
97 |
+
return sorted([f["name"] for f in out])
|
98 |
+
|
99 |
+
def mkdir(self, path, create_parents=True, **kwargs):
|
100 |
+
path = self._strip_protocol(path)
|
101 |
+
if path in self.store or path in self.pseudo_dirs:
|
102 |
+
raise FileExistsError(path)
|
103 |
+
if self._parent(path).strip("/") and self.isfile(self._parent(path)):
|
104 |
+
raise NotADirectoryError(self._parent(path))
|
105 |
+
if create_parents and self._parent(path).strip("/"):
|
106 |
+
try:
|
107 |
+
self.mkdir(self._parent(path), create_parents, **kwargs)
|
108 |
+
except FileExistsError:
|
109 |
+
pass
|
110 |
+
if path and path not in self.pseudo_dirs:
|
111 |
+
self.pseudo_dirs.append(path)
|
112 |
+
|
113 |
+
def makedirs(self, path, exist_ok=False):
|
114 |
+
try:
|
115 |
+
self.mkdir(path, create_parents=True)
|
116 |
+
except FileExistsError:
|
117 |
+
if not exist_ok:
|
118 |
+
raise
|
119 |
+
|
120 |
+
def pipe_file(self, path, value, **kwargs):
|
121 |
+
"""Set the bytes of given file
|
122 |
+
|
123 |
+
Avoids copies of the data if possible
|
124 |
+
"""
|
125 |
+
self.open(path, "wb", data=value)
|
126 |
+
|
127 |
+
def rmdir(self, path):
|
128 |
+
path = self._strip_protocol(path)
|
129 |
+
if path == "":
|
130 |
+
# silently avoid deleting FS root
|
131 |
+
return
|
132 |
+
if path in self.pseudo_dirs:
|
133 |
+
if not self.ls(path):
|
134 |
+
self.pseudo_dirs.remove(path)
|
135 |
+
else:
|
136 |
+
raise OSError(ENOTEMPTY, "Directory not empty", path)
|
137 |
+
else:
|
138 |
+
raise FileNotFoundError(path)
|
139 |
+
|
140 |
+
def info(self, path, **kwargs):
|
141 |
+
path = self._strip_protocol(path)
|
142 |
+
if path in self.pseudo_dirs or any(
|
143 |
+
p.startswith(path + "/") for p in list(self.store) + self.pseudo_dirs
|
144 |
+
):
|
145 |
+
return {
|
146 |
+
"name": path,
|
147 |
+
"size": 0,
|
148 |
+
"type": "directory",
|
149 |
+
}
|
150 |
+
elif path in self.store:
|
151 |
+
filelike = self.store[path]
|
152 |
+
return {
|
153 |
+
"name": path,
|
154 |
+
"size": filelike.size,
|
155 |
+
"type": "file",
|
156 |
+
"created": getattr(filelike, "created", None),
|
157 |
+
}
|
158 |
+
else:
|
159 |
+
raise FileNotFoundError(path)
|
160 |
+
|
161 |
+
def _open(
|
162 |
+
self,
|
163 |
+
path,
|
164 |
+
mode="rb",
|
165 |
+
block_size=None,
|
166 |
+
autocommit=True,
|
167 |
+
cache_options=None,
|
168 |
+
**kwargs,
|
169 |
+
):
|
170 |
+
path = self._strip_protocol(path)
|
171 |
+
if path in self.pseudo_dirs:
|
172 |
+
raise IsADirectoryError(path)
|
173 |
+
parent = path
|
174 |
+
while len(parent) > 1:
|
175 |
+
parent = self._parent(parent)
|
176 |
+
if self.isfile(parent):
|
177 |
+
raise FileExistsError(parent)
|
178 |
+
if mode in ["rb", "ab", "r+b"]:
|
179 |
+
if path in self.store:
|
180 |
+
f = self.store[path]
|
181 |
+
if mode == "ab":
|
182 |
+
# position at the end of file
|
183 |
+
f.seek(0, 2)
|
184 |
+
else:
|
185 |
+
# position at the beginning of file
|
186 |
+
f.seek(0)
|
187 |
+
return f
|
188 |
+
else:
|
189 |
+
raise FileNotFoundError(path)
|
190 |
+
elif mode == "wb":
|
191 |
+
m = MemoryFile(self, path, kwargs.get("data"))
|
192 |
+
if not self._intrans:
|
193 |
+
m.commit()
|
194 |
+
return m
|
195 |
+
else:
|
196 |
+
name = self.__class__.__name__
|
197 |
+
raise ValueError(f"unsupported file mode for {name}: {mode!r}")
|
198 |
+
|
199 |
+
def cp_file(self, path1, path2, **kwargs):
|
200 |
+
path1 = self._strip_protocol(path1)
|
201 |
+
path2 = self._strip_protocol(path2)
|
202 |
+
if self.isfile(path1):
|
203 |
+
self.store[path2] = MemoryFile(
|
204 |
+
self, path2, self.store[path1].getvalue()
|
205 |
+
) # implicit copy
|
206 |
+
elif self.isdir(path1):
|
207 |
+
if path2 not in self.pseudo_dirs:
|
208 |
+
self.pseudo_dirs.append(path2)
|
209 |
+
else:
|
210 |
+
raise FileNotFoundError(path1)
|
211 |
+
|
212 |
+
def cat_file(self, path, start=None, end=None, **kwargs):
|
213 |
+
path = self._strip_protocol(path)
|
214 |
+
try:
|
215 |
+
return bytes(self.store[path].getbuffer()[start:end])
|
216 |
+
except KeyError:
|
217 |
+
raise FileNotFoundError(path)
|
218 |
+
|
219 |
+
def _rm(self, path):
|
220 |
+
path = self._strip_protocol(path)
|
221 |
+
try:
|
222 |
+
del self.store[path]
|
223 |
+
except KeyError as e:
|
224 |
+
raise FileNotFoundError(path) from e
|
225 |
+
|
226 |
+
def modified(self, path):
|
227 |
+
path = self._strip_protocol(path)
|
228 |
+
try:
|
229 |
+
return self.store[path].modified
|
230 |
+
except KeyError:
|
231 |
+
raise FileNotFoundError(path)
|
232 |
+
|
233 |
+
def created(self, path):
|
234 |
+
path = self._strip_protocol(path)
|
235 |
+
try:
|
236 |
+
return self.store[path].created
|
237 |
+
except KeyError:
|
238 |
+
raise FileNotFoundError(path)
|
239 |
+
|
240 |
+
def rm(self, path, recursive=False, maxdepth=None):
|
241 |
+
if isinstance(path, str):
|
242 |
+
path = self._strip_protocol(path)
|
243 |
+
else:
|
244 |
+
path = [self._strip_protocol(p) for p in path]
|
245 |
+
paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
|
246 |
+
for p in reversed(paths):
|
247 |
+
# If the expanded path doesn't exist, it is only because the expanded
|
248 |
+
# path was a directory that does not exist in self.pseudo_dirs. This
|
249 |
+
# is possible if you directly create files without making the
|
250 |
+
# directories first.
|
251 |
+
if not self.exists(p):
|
252 |
+
continue
|
253 |
+
if self.isfile(p):
|
254 |
+
self.rm_file(p)
|
255 |
+
else:
|
256 |
+
self.rmdir(p)
|
257 |
+
|
258 |
+
|
259 |
+
class MemoryFile(BytesIO):
|
260 |
+
"""A BytesIO which can't close and works as a context manager
|
261 |
+
|
262 |
+
Can initialise with data. Each path should only be active once at any moment.
|
263 |
+
|
264 |
+
No need to provide fs, path if auto-committing (default)
|
265 |
+
"""
|
266 |
+
|
267 |
+
def __init__(self, fs=None, path=None, data=None):
|
268 |
+
logger.debug("open file %s", path)
|
269 |
+
self.fs = fs
|
270 |
+
self.path = path
|
271 |
+
self.created = datetime.now(tz=timezone.utc)
|
272 |
+
self.modified = datetime.now(tz=timezone.utc)
|
273 |
+
if data:
|
274 |
+
super().__init__(data)
|
275 |
+
self.seek(0)
|
276 |
+
|
277 |
+
@property
|
278 |
+
def size(self):
|
279 |
+
return self.getbuffer().nbytes
|
280 |
+
|
281 |
+
def __enter__(self):
|
282 |
+
return self
|
283 |
+
|
284 |
+
def close(self):
|
285 |
+
pass
|
286 |
+
|
287 |
+
def discard(self):
|
288 |
+
pass
|
289 |
+
|
290 |
+
def commit(self):
|
291 |
+
self.fs.store[self.path] = self
|
292 |
+
self.modified = datetime.now(tz=timezone.utc)
|