applied-ai-018 commited on
Commit
a8229b8
·
verified ·
1 Parent(s): cca03c8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/fsspec/__init__.py +70 -0
  2. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/asyn.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/caching.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/conftest.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/exceptions.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/transaction.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/fsspec/_version.py +21 -0
  24. llmeval-env/lib/python3.10/site-packages/fsspec/archive.py +73 -0
  25. llmeval-env/lib/python3.10/site-packages/fsspec/asyn.py +1096 -0
  26. llmeval-env/lib/python3.10/site-packages/fsspec/callbacks.py +324 -0
  27. llmeval-env/lib/python3.10/site-packages/fsspec/compression.py +174 -0
  28. llmeval-env/lib/python3.10/site-packages/fsspec/config.py +131 -0
  29. llmeval-env/lib/python3.10/site-packages/fsspec/conftest.py +55 -0
  30. llmeval-env/lib/python3.10/site-packages/fsspec/core.py +714 -0
  31. llmeval-env/lib/python3.10/site-packages/fsspec/exceptions.py +17 -0
  32. llmeval-env/lib/python3.10/site-packages/fsspec/fuse.py +324 -0
  33. llmeval-env/lib/python3.10/site-packages/fsspec/gui.py +414 -0
  34. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__init__.py +0 -0
  35. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/arrow.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/data.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/ftp.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/github.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/http.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/fsspec/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.metadata import entry_points
2
+
3
+ from . import _version, caching
4
+ from .callbacks import Callback
5
+ from .compression import available_compressions
6
+ from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs
7
+ from .exceptions import FSTimeoutError
8
+ from .mapping import FSMap, get_mapper
9
+ from .registry import (
10
+ available_protocols,
11
+ filesystem,
12
+ get_filesystem_class,
13
+ register_implementation,
14
+ registry,
15
+ )
16
+ from .spec import AbstractFileSystem
17
+
18
+ __version__ = _version.get_versions()["version"]
19
+
20
+ __all__ = [
21
+ "AbstractFileSystem",
22
+ "FSTimeoutError",
23
+ "FSMap",
24
+ "filesystem",
25
+ "register_implementation",
26
+ "get_filesystem_class",
27
+ "get_fs_token_paths",
28
+ "get_mapper",
29
+ "open",
30
+ "open_files",
31
+ "open_local",
32
+ "registry",
33
+ "caching",
34
+ "Callback",
35
+ "available_protocols",
36
+ "available_compressions",
37
+ "url_to_fs",
38
+ ]
39
+
40
+
41
+ def process_entries():
42
+ if entry_points is not None:
43
+ try:
44
+ eps = entry_points()
45
+ except TypeError:
46
+ pass # importlib-metadata < 0.8
47
+ else:
48
+ if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
49
+ specs = eps.select(group="fsspec.specs")
50
+ else:
51
+ specs = eps.get("fsspec.specs", [])
52
+ registered_names = {}
53
+ for spec in specs:
54
+ err_msg = f"Unable to load filesystem from {spec}"
55
+ name = spec.name
56
+ if name in registered_names:
57
+ continue
58
+ registered_names[name] = True
59
+ register_implementation(
60
+ name,
61
+ spec.value.replace(":", "."),
62
+ errtxt=err_msg,
63
+ # We take our implementations as the ones to overload with if
64
+ # for some reason we encounter some, may be the same, already
65
+ # registered
66
+ clobber=True,
67
+ )
68
+
69
+
70
+ process_entries()
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc ADDED
Binary file (506 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc ADDED
Binary file (2.99 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/asyn.cpython-310.pyc ADDED
Binary file (29.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/caching.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc ADDED
Binary file (5.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (743 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc ADDED
Binary file (8.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc ADDED
Binary file (8.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc ADDED
Binary file (58.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/transaction.cpython-310.pyc ADDED
Binary file (3.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/_version.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # This file was generated by 'versioneer.py' (0.29) from
3
+ # revision-control system data, or from the parent directory name of an
4
+ # unpacked source archive. Distribution tarballs contain a pre-generated copy
5
+ # of this file.
6
+
7
+ import json
8
+
9
+ version_json = '''
10
+ {
11
+ "date": "2024-03-18T15:33:58-0400",
12
+ "dirty": false,
13
+ "error": null,
14
+ "full-revisionid": "47b445ae4c284a82dd15e0287b1ffc410e8fc470",
15
+ "version": "2024.3.1"
16
+ }
17
+ ''' # END VERSION_JSON
18
+
19
+
20
+ def get_versions():
21
+ return json.loads(version_json)
llmeval-env/lib/python3.10/site-packages/fsspec/archive.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fsspec import AbstractFileSystem
2
+ from fsspec.utils import tokenize
3
+
4
+
5
+ class AbstractArchiveFileSystem(AbstractFileSystem):
6
+ """
7
+ A generic superclass for implementing Archive-based filesystems.
8
+
9
+ Currently, it is shared amongst
10
+ :class:`~fsspec.implementations.zip.ZipFileSystem`,
11
+ :class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and
12
+ :class:`~fsspec.implementations.tar.TarFileSystem`.
13
+ """
14
+
15
+ def __str__(self):
16
+ return f"<Archive-like object {type(self).__name__} at {id(self)}>"
17
+
18
+ __repr__ = __str__
19
+
20
+ def ukey(self, path):
21
+ return tokenize(path, self.fo, self.protocol)
22
+
23
+ def _all_dirnames(self, paths):
24
+ """Returns *all* directory names for each path in paths, including intermediate
25
+ ones.
26
+
27
+ Parameters
28
+ ----------
29
+ paths: Iterable of path strings
30
+ """
31
+ if len(paths) == 0:
32
+ return set()
33
+
34
+ dirnames = {self._parent(path) for path in paths} - {self.root_marker}
35
+ return dirnames | self._all_dirnames(dirnames)
36
+
37
+ def info(self, path, **kwargs):
38
+ self._get_dirs()
39
+ path = self._strip_protocol(path)
40
+ if path in {"", "/"} and self.dir_cache:
41
+ return {"name": "", "type": "directory", "size": 0}
42
+ if path in self.dir_cache:
43
+ return self.dir_cache[path]
44
+ elif path + "/" in self.dir_cache:
45
+ return self.dir_cache[path + "/"]
46
+ else:
47
+ raise FileNotFoundError(path)
48
+
49
+ def ls(self, path, detail=True, **kwargs):
50
+ self._get_dirs()
51
+ paths = {}
52
+ for p, f in self.dir_cache.items():
53
+ p = p.rstrip("/")
54
+ if "/" in p:
55
+ root = p.rsplit("/", 1)[0]
56
+ else:
57
+ root = ""
58
+ if root == path.rstrip("/"):
59
+ paths[p] = f
60
+ elif all(
61
+ (a == b)
62
+ for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
63
+ ):
64
+ # root directory entry
65
+ ppath = p.rstrip("/").split("/", 1)[0]
66
+ if ppath not in paths:
67
+ out = {"name": ppath, "size": 0, "type": "directory"}
68
+ paths[ppath] = out
69
+ if detail:
70
+ out = sorted(paths.values(), key=lambda _: _["name"])
71
+ return out
72
+ else:
73
+ return sorted(paths)
llmeval-env/lib/python3.10/site-packages/fsspec/asyn.py ADDED
@@ -0,0 +1,1096 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import asyncio.events
3
+ import functools
4
+ import inspect
5
+ import io
6
+ import numbers
7
+ import os
8
+ import re
9
+ import threading
10
+ from contextlib import contextmanager
11
+ from glob import has_magic
12
+ from typing import TYPE_CHECKING, Iterable
13
+
14
+ from .callbacks import DEFAULT_CALLBACK
15
+ from .exceptions import FSTimeoutError
16
+ from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
17
+ from .spec import AbstractBufferedFile, AbstractFileSystem
18
+ from .utils import glob_translate, is_exception, other_paths
19
+
20
+ private = re.compile("_[^_]")
21
+ iothread = [None] # dedicated fsspec IO thread
22
+ loop = [None] # global event loop for any non-async instance
23
+ _lock = None # global lock placeholder
24
+ get_running_loop = asyncio.get_running_loop
25
+
26
+
27
+ def get_lock():
28
+ """Allocate or return a threading lock.
29
+
30
+ The lock is allocated on first use to allow setting one lock per forked process.
31
+ """
32
+ global _lock
33
+ if not _lock:
34
+ _lock = threading.Lock()
35
+ return _lock
36
+
37
+
38
+ def reset_lock():
39
+ """Reset the global lock.
40
+
41
+ This should be called only on the init of a forked process to reset the lock to
42
+ None, enabling the new forked process to get a new lock.
43
+ """
44
+ global _lock
45
+
46
+ iothread[0] = None
47
+ loop[0] = None
48
+ _lock = None
49
+
50
+
51
+ async def _runner(event, coro, result, timeout=None):
52
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
53
+ if timeout is not None:
54
+ coro = asyncio.wait_for(coro, timeout=timeout)
55
+ try:
56
+ result[0] = await coro
57
+ except Exception as ex:
58
+ result[0] = ex
59
+ finally:
60
+ event.set()
61
+
62
+
63
+ def sync(loop, func, *args, timeout=None, **kwargs):
64
+ """
65
+ Make loop run coroutine until it returns. Runs in other thread
66
+
67
+ Examples
68
+ --------
69
+ >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
70
+ timeout=timeout, **kwargs)
71
+ """
72
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
73
+ # NB: if the loop is not running *yet*, it is OK to submit work
74
+ # and we will wait for it
75
+ if loop is None or loop.is_closed():
76
+ raise RuntimeError("Loop is not running")
77
+ try:
78
+ loop0 = asyncio.events.get_running_loop()
79
+ if loop0 is loop:
80
+ raise NotImplementedError("Calling sync() from within a running loop")
81
+ except NotImplementedError:
82
+ raise
83
+ except RuntimeError:
84
+ pass
85
+ coro = func(*args, **kwargs)
86
+ result = [None]
87
+ event = threading.Event()
88
+ asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
89
+ while True:
90
+ # this loops allows thread to get interrupted
91
+ if event.wait(1):
92
+ break
93
+ if timeout is not None:
94
+ timeout -= 1
95
+ if timeout < 0:
96
+ raise FSTimeoutError
97
+
98
+ return_result = result[0]
99
+ if isinstance(return_result, asyncio.TimeoutError):
100
+ # suppress asyncio.TimeoutError, raise FSTimeoutError
101
+ raise FSTimeoutError from return_result
102
+ elif isinstance(return_result, BaseException):
103
+ raise return_result
104
+ else:
105
+ return return_result
106
+
107
+
108
+ def sync_wrapper(func, obj=None):
109
+ """Given a function, make so can be called in blocking contexts
110
+
111
+ Leave obj=None if defining within a class. Pass the instance if attaching
112
+ as an attribute of the instance.
113
+ """
114
+
115
+ @functools.wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ self = obj or args[0]
118
+ return sync(self.loop, func, *args, **kwargs)
119
+
120
+ return wrapper
121
+
122
+
123
+ @contextmanager
124
+ def _selector_policy():
125
+ original_policy = asyncio.get_event_loop_policy()
126
+ try:
127
+ if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
128
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
129
+
130
+ yield
131
+ finally:
132
+ asyncio.set_event_loop_policy(original_policy)
133
+
134
+
135
+ def get_loop():
136
+ """Create or return the default fsspec IO loop
137
+
138
+ The loop will be running on a separate thread.
139
+ """
140
+ if loop[0] is None:
141
+ with get_lock():
142
+ # repeat the check just in case the loop got filled between the
143
+ # previous two calls from another thread
144
+ if loop[0] is None:
145
+ with _selector_policy():
146
+ loop[0] = asyncio.new_event_loop()
147
+ th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
148
+ th.daemon = True
149
+ th.start()
150
+ iothread[0] = th
151
+ return loop[0]
152
+
153
+
154
+ if TYPE_CHECKING:
155
+ import resource
156
+
157
+ ResourceError = resource.error
158
+ else:
159
+ try:
160
+ import resource
161
+ except ImportError:
162
+ resource = None
163
+ ResourceError = OSError
164
+ else:
165
+ ResourceError = getattr(resource, "error", OSError)
166
+
167
+ _DEFAULT_BATCH_SIZE = 128
168
+ _NOFILES_DEFAULT_BATCH_SIZE = 1280
169
+
170
+
171
+ def _get_batch_size(nofiles=False):
172
+ from fsspec.config import conf
173
+
174
+ if nofiles:
175
+ if "nofiles_gather_batch_size" in conf:
176
+ return conf["nofiles_gather_batch_size"]
177
+ else:
178
+ if "gather_batch_size" in conf:
179
+ return conf["gather_batch_size"]
180
+ if nofiles:
181
+ return _NOFILES_DEFAULT_BATCH_SIZE
182
+ if resource is None:
183
+ return _DEFAULT_BATCH_SIZE
184
+
185
+ try:
186
+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
187
+ except (ImportError, ValueError, ResourceError):
188
+ return _DEFAULT_BATCH_SIZE
189
+
190
+ if soft_limit == resource.RLIM_INFINITY:
191
+ return -1
192
+ else:
193
+ return soft_limit // 8
194
+
195
+
196
+ def running_async() -> bool:
197
+ """Being executed by an event loop?"""
198
+ try:
199
+ asyncio.get_running_loop()
200
+ return True
201
+ except RuntimeError:
202
+ return False
203
+
204
+
205
+ async def _run_coros_in_chunks(
206
+ coros,
207
+ batch_size=None,
208
+ callback=DEFAULT_CALLBACK,
209
+ timeout=None,
210
+ return_exceptions=False,
211
+ nofiles=False,
212
+ ):
213
+ """Run the given coroutines in chunks.
214
+
215
+ Parameters
216
+ ----------
217
+ coros: list of coroutines to run
218
+ batch_size: int or None
219
+ Number of coroutines to submit/wait on simultaneously.
220
+ If -1, then it will not be any throttling. If
221
+ None, it will be inferred from _get_batch_size()
222
+ callback: fsspec.callbacks.Callback instance
223
+ Gets a relative_update when each coroutine completes
224
+ timeout: number or None
225
+ If given, each coroutine times out after this time. Note that, since
226
+ there are multiple batches, the total run time of this function will in
227
+ general be longer
228
+ return_exceptions: bool
229
+ Same meaning as in asyncio.gather
230
+ nofiles: bool
231
+ If inferring the batch_size, does this operation involve local files?
232
+ If yes, you normally expect smaller batches.
233
+ """
234
+
235
+ if batch_size is None:
236
+ batch_size = _get_batch_size(nofiles=nofiles)
237
+
238
+ if batch_size == -1:
239
+ batch_size = len(coros)
240
+
241
+ assert batch_size > 0
242
+
243
+ async def _run_coro(coro, i):
244
+ try:
245
+ return await asyncio.wait_for(coro, timeout=timeout), i
246
+ except Exception as e:
247
+ if not return_exceptions:
248
+ raise
249
+ return e, i
250
+ finally:
251
+ callback.relative_update(1)
252
+
253
+ i = 0
254
+ n = len(coros)
255
+ results = [None] * n
256
+ pending = set()
257
+
258
+ while pending or i < n:
259
+ while len(pending) < batch_size and i < n:
260
+ pending.add(asyncio.ensure_future(_run_coro(coros[i], i)))
261
+ i += 1
262
+
263
+ if not pending:
264
+ break
265
+
266
+ done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
267
+ while done:
268
+ result, k = await done.pop()
269
+ results[k] = result
270
+
271
+ return results
272
+
273
+
274
+ # these methods should be implemented as async by any async-able backend
275
+ async_methods = [
276
+ "_ls",
277
+ "_cat_file",
278
+ "_get_file",
279
+ "_put_file",
280
+ "_rm_file",
281
+ "_cp_file",
282
+ "_pipe_file",
283
+ "_expand_path",
284
+ "_info",
285
+ "_isfile",
286
+ "_isdir",
287
+ "_exists",
288
+ "_walk",
289
+ "_glob",
290
+ "_find",
291
+ "_du",
292
+ "_size",
293
+ "_mkdir",
294
+ "_makedirs",
295
+ ]
296
+
297
+
298
+ class AsyncFileSystem(AbstractFileSystem):
299
+ """Async file operations, default implementations
300
+
301
+ Passes bulk operations to asyncio.gather for concurrent operation.
302
+
303
+ Implementations that have concurrent batch operations and/or async methods
304
+ should inherit from this class instead of AbstractFileSystem. Docstrings are
305
+ copied from the un-underscored method in AbstractFileSystem, if not given.
306
+ """
307
+
308
+ # note that methods do not have docstring here; they will be copied
309
+ # for _* methods and inferred for overridden methods.
310
+
311
+ async_impl = True
312
+ mirror_sync_methods = True
313
+ disable_throttling = False
314
+
315
+ def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
316
+ self.asynchronous = asynchronous
317
+ self._pid = os.getpid()
318
+ if not asynchronous:
319
+ self._loop = loop or get_loop()
320
+ else:
321
+ self._loop = None
322
+ self.batch_size = batch_size
323
+ super().__init__(*args, **kwargs)
324
+
325
+ @property
326
+ def loop(self):
327
+ if self._pid != os.getpid():
328
+ raise RuntimeError("This class is not fork-safe")
329
+ return self._loop
330
+
331
+ async def _rm_file(self, path, **kwargs):
332
+ raise NotImplementedError
333
+
334
+ async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
335
+ # TODO: implement on_error
336
+ batch_size = batch_size or self.batch_size
337
+ path = await self._expand_path(path, recursive=recursive)
338
+ return await _run_coros_in_chunks(
339
+ [self._rm_file(p, **kwargs) for p in reversed(path)],
340
+ batch_size=batch_size,
341
+ nofiles=True,
342
+ )
343
+
344
+ async def _cp_file(self, path1, path2, **kwargs):
345
+ raise NotImplementedError
346
+
347
+ async def _copy(
348
+ self,
349
+ path1,
350
+ path2,
351
+ recursive=False,
352
+ on_error=None,
353
+ maxdepth=None,
354
+ batch_size=None,
355
+ **kwargs,
356
+ ):
357
+ if on_error is None and recursive:
358
+ on_error = "ignore"
359
+ elif on_error is None:
360
+ on_error = "raise"
361
+
362
+ if isinstance(path1, list) and isinstance(path2, list):
363
+ # No need to expand paths when both source and destination
364
+ # are provided as lists
365
+ paths1 = path1
366
+ paths2 = path2
367
+ else:
368
+ source_is_str = isinstance(path1, str)
369
+ paths1 = await self._expand_path(
370
+ path1, maxdepth=maxdepth, recursive=recursive
371
+ )
372
+ if source_is_str and (not recursive or maxdepth is not None):
373
+ # Non-recursive glob does not copy directories
374
+ paths1 = [
375
+ p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
376
+ ]
377
+ if not paths1:
378
+ return
379
+
380
+ source_is_file = len(paths1) == 1
381
+ dest_is_dir = isinstance(path2, str) and (
382
+ trailing_sep(path2) or await self._isdir(path2)
383
+ )
384
+
385
+ exists = source_is_str and (
386
+ (has_magic(path1) and source_is_file)
387
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
388
+ )
389
+ paths2 = other_paths(
390
+ paths1,
391
+ path2,
392
+ exists=exists,
393
+ flatten=not source_is_str,
394
+ )
395
+
396
+ batch_size = batch_size or self.batch_size
397
+ coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
398
+ result = await _run_coros_in_chunks(
399
+ coros, batch_size=batch_size, return_exceptions=True, nofiles=True
400
+ )
401
+
402
+ for ex in filter(is_exception, result):
403
+ if on_error == "ignore" and isinstance(ex, FileNotFoundError):
404
+ continue
405
+ raise ex
406
+
407
+ async def _pipe_file(self, path, value, **kwargs):
408
+ raise NotImplementedError
409
+
410
+ async def _pipe(self, path, value=None, batch_size=None, **kwargs):
411
+ if isinstance(path, str):
412
+ path = {path: value}
413
+ batch_size = batch_size or self.batch_size
414
+ return await _run_coros_in_chunks(
415
+ [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
416
+ batch_size=batch_size,
417
+ nofiles=True,
418
+ )
419
+
420
+ async def _process_limits(self, url, start, end):
421
+ """Helper for "Range"-based _cat_file"""
422
+ size = None
423
+ suff = False
424
+ if start is not None and start < 0:
425
+ # if start is negative and end None, end is the "suffix length"
426
+ if end is None:
427
+ end = -start
428
+ start = ""
429
+ suff = True
430
+ else:
431
+ size = size or (await self._info(url))["size"]
432
+ start = size + start
433
+ elif start is None:
434
+ start = 0
435
+ if not suff:
436
+ if end is not None and end < 0:
437
+ if start is not None:
438
+ size = size or (await self._info(url))["size"]
439
+ end = size + end
440
+ elif end is None:
441
+ end = ""
442
+ if isinstance(end, numbers.Integral):
443
+ end -= 1 # bytes range is inclusive
444
+ return f"bytes={start}-{end}"
445
+
446
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
447
+ raise NotImplementedError
448
+
449
+ async def _cat(
450
+ self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
451
+ ):
452
+ paths = await self._expand_path(path, recursive=recursive)
453
+ coros = [self._cat_file(path, **kwargs) for path in paths]
454
+ batch_size = batch_size or self.batch_size
455
+ out = await _run_coros_in_chunks(
456
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
457
+ )
458
+ if on_error == "raise":
459
+ ex = next(filter(is_exception, out), False)
460
+ if ex:
461
+ raise ex
462
+ if (
463
+ len(paths) > 1
464
+ or isinstance(path, list)
465
+ or paths[0] != self._strip_protocol(path)
466
+ ):
467
+ return {
468
+ k: v
469
+ for k, v in zip(paths, out)
470
+ if on_error != "omit" or not is_exception(v)
471
+ }
472
+ else:
473
+ return out[0]
474
+
475
+ async def _cat_ranges(
476
+ self,
477
+ paths,
478
+ starts,
479
+ ends,
480
+ max_gap=None,
481
+ batch_size=None,
482
+ on_error="return",
483
+ **kwargs,
484
+ ):
485
+ """Get the contents of byte ranges from one or more files
486
+
487
+ Parameters
488
+ ----------
489
+ paths: list
490
+ A list of of filepaths on this filesystems
491
+ starts, ends: int or list
492
+ Bytes limits of the read. If using a single int, the same value will be
493
+ used to read all the specified files.
494
+ """
495
+ # TODO: on_error
496
+ if max_gap is not None:
497
+ # use utils.merge_offset_ranges
498
+ raise NotImplementedError
499
+ if not isinstance(paths, list):
500
+ raise TypeError
501
+ if not isinstance(starts, Iterable):
502
+ starts = [starts] * len(paths)
503
+ if not isinstance(ends, Iterable):
504
+ ends = [ends] * len(paths)
505
+ if len(starts) != len(paths) or len(ends) != len(paths):
506
+ raise ValueError
507
+ coros = [
508
+ self._cat_file(p, start=s, end=e, **kwargs)
509
+ for p, s, e in zip(paths, starts, ends)
510
+ ]
511
+ batch_size = batch_size or self.batch_size
512
+ return await _run_coros_in_chunks(
513
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
514
+ )
515
+
516
+ async def _put_file(self, lpath, rpath, **kwargs):
517
+ raise NotImplementedError
518
+
519
+ async def _put(
520
+ self,
521
+ lpath,
522
+ rpath,
523
+ recursive=False,
524
+ callback=DEFAULT_CALLBACK,
525
+ batch_size=None,
526
+ maxdepth=None,
527
+ **kwargs,
528
+ ):
529
+ """Copy file(s) from local.
530
+
531
+ Copies a specific file or tree of files (if recursive=True). If rpath
532
+ ends with a "/", it will be assumed to be a directory, and target files
533
+ will go within.
534
+
535
+ The put_file method will be called concurrently on a batch of files. The
536
+ batch_size option can configure the amount of futures that can be executed
537
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
538
+ The default can be set for this instance by passing "batch_size" in the
539
+ constructor, or for all instances by setting the "gather_batch_size" key
540
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
541
+ """
542
+ if isinstance(lpath, list) and isinstance(rpath, list):
543
+ # No need to expand paths when both source and destination
544
+ # are provided as lists
545
+ rpaths = rpath
546
+ lpaths = lpath
547
+ else:
548
+ source_is_str = isinstance(lpath, str)
549
+ if source_is_str:
550
+ lpath = make_path_posix(lpath)
551
+ fs = LocalFileSystem()
552
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
553
+ if source_is_str and (not recursive or maxdepth is not None):
554
+ # Non-recursive glob does not copy directories
555
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
556
+ if not lpaths:
557
+ return
558
+
559
+ source_is_file = len(lpaths) == 1
560
+ dest_is_dir = isinstance(rpath, str) and (
561
+ trailing_sep(rpath) or await self._isdir(rpath)
562
+ )
563
+
564
+ rpath = self._strip_protocol(rpath)
565
+ exists = source_is_str and (
566
+ (has_magic(lpath) and source_is_file)
567
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
568
+ )
569
+ rpaths = other_paths(
570
+ lpaths,
571
+ rpath,
572
+ exists=exists,
573
+ flatten=not source_is_str,
574
+ )
575
+
576
+ is_dir = {l: os.path.isdir(l) for l in lpaths}
577
+ rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
578
+ file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
579
+
580
+ await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
581
+ batch_size = batch_size or self.batch_size
582
+
583
+ coros = []
584
+ callback.set_size(len(file_pairs))
585
+ for lfile, rfile in file_pairs:
586
+ put_file = callback.branch_coro(self._put_file)
587
+ coros.append(put_file(lfile, rfile, **kwargs))
588
+
589
+ return await _run_coros_in_chunks(
590
+ coros, batch_size=batch_size, callback=callback
591
+ )
592
+
593
+ async def _get_file(self, rpath, lpath, **kwargs):
594
+ raise NotImplementedError
595
+
596
+ async def _get(
597
+ self,
598
+ rpath,
599
+ lpath,
600
+ recursive=False,
601
+ callback=DEFAULT_CALLBACK,
602
+ maxdepth=None,
603
+ **kwargs,
604
+ ):
605
+ """Copy file(s) to local.
606
+
607
+ Copies a specific file or tree of files (if recursive=True). If lpath
608
+ ends with a "/", it will be assumed to be a directory, and target files
609
+ will go within. Can submit a list of paths, which may be glob-patterns
610
+ and will be expanded.
611
+
612
+ The get_file method will be called concurrently on a batch of files. The
613
+ batch_size option can configure the amount of futures that can be executed
614
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
615
+ The default can be set for this instance by passing "batch_size" in the
616
+ constructor, or for all instances by setting the "gather_batch_size" key
617
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
618
+ """
619
+ if isinstance(lpath, list) and isinstance(rpath, list):
620
+ # No need to expand paths when both source and destination
621
+ # are provided as lists
622
+ rpaths = rpath
623
+ lpaths = lpath
624
+ else:
625
+ source_is_str = isinstance(rpath, str)
626
+ # First check for rpath trailing slash as _strip_protocol removes it.
627
+ source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
628
+ rpath = self._strip_protocol(rpath)
629
+ rpaths = await self._expand_path(
630
+ rpath, recursive=recursive, maxdepth=maxdepth
631
+ )
632
+ if source_is_str and (not recursive or maxdepth is not None):
633
+ # Non-recursive glob does not copy directories
634
+ rpaths = [
635
+ p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
636
+ ]
637
+ if not rpaths:
638
+ return
639
+
640
+ lpath = make_path_posix(lpath)
641
+ source_is_file = len(rpaths) == 1
642
+ dest_is_dir = isinstance(lpath, str) and (
643
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
644
+ )
645
+
646
+ exists = source_is_str and (
647
+ (has_magic(rpath) and source_is_file)
648
+ or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
649
+ )
650
+ lpaths = other_paths(
651
+ rpaths,
652
+ lpath,
653
+ exists=exists,
654
+ flatten=not source_is_str,
655
+ )
656
+
657
+ [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
658
+ batch_size = kwargs.pop("batch_size", self.batch_size)
659
+
660
+ coros = []
661
+ callback.set_size(len(lpaths))
662
+ for lpath, rpath in zip(lpaths, rpaths):
663
+ get_file = callback.branch_coro(self._get_file)
664
+ coros.append(get_file(rpath, lpath, **kwargs))
665
+ return await _run_coros_in_chunks(
666
+ coros, batch_size=batch_size, callback=callback
667
+ )
668
+
669
+ async def _isfile(self, path):
670
+ try:
671
+ return (await self._info(path))["type"] == "file"
672
+ except: # noqa: E722
673
+ return False
674
+
675
+ async def _isdir(self, path):
676
+ try:
677
+ return (await self._info(path))["type"] == "directory"
678
+ except OSError:
679
+ return False
680
+
681
+ async def _size(self, path):
682
+ return (await self._info(path)).get("size", None)
683
+
684
+ async def _sizes(self, paths, batch_size=None):
685
+ batch_size = batch_size or self.batch_size
686
+ return await _run_coros_in_chunks(
687
+ [self._size(p) for p in paths], batch_size=batch_size
688
+ )
689
+
690
+ async def _exists(self, path, **kwargs):
691
+ try:
692
+ await self._info(path, **kwargs)
693
+ return True
694
+ except FileNotFoundError:
695
+ return False
696
+
697
+ async def _info(self, path, **kwargs):
698
+ raise NotImplementedError
699
+
700
+ async def _ls(self, path, detail=True, **kwargs):
701
+ raise NotImplementedError
702
+
703
+ async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
704
+ if maxdepth is not None and maxdepth < 1:
705
+ raise ValueError("maxdepth must be at least 1")
706
+
707
+ path = self._strip_protocol(path)
708
+ full_dirs = {}
709
+ dirs = {}
710
+ files = {}
711
+
712
+ detail = kwargs.pop("detail", False)
713
+ try:
714
+ listing = await self._ls(path, detail=True, **kwargs)
715
+ except (FileNotFoundError, OSError) as e:
716
+ if on_error == "raise":
717
+ raise
718
+ elif callable(on_error):
719
+ on_error(e)
720
+ if detail:
721
+ yield path, {}, {}
722
+ else:
723
+ yield path, [], []
724
+ return
725
+
726
+ for info in listing:
727
+ # each info name must be at least [path]/part , but here
728
+ # we check also for names like [path]/part/
729
+ pathname = info["name"].rstrip("/")
730
+ name = pathname.rsplit("/", 1)[-1]
731
+ if info["type"] == "directory" and pathname != path:
732
+ # do not include "self" path
733
+ full_dirs[name] = pathname
734
+ dirs[name] = info
735
+ elif pathname == path:
736
+ # file-like with same name as give path
737
+ files[""] = info
738
+ else:
739
+ files[name] = info
740
+
741
+ if detail:
742
+ yield path, dirs, files
743
+ else:
744
+ yield path, list(dirs), list(files)
745
+
746
+ if maxdepth is not None:
747
+ maxdepth -= 1
748
+ if maxdepth < 1:
749
+ return
750
+
751
+ for d in dirs:
752
+ async for _ in self._walk(
753
+ full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
754
+ ):
755
+ yield _
756
+
757
+ async def _glob(self, path, maxdepth=None, **kwargs):
758
+ if maxdepth is not None and maxdepth < 1:
759
+ raise ValueError("maxdepth must be at least 1")
760
+
761
+ import re
762
+
763
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
764
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
765
+ path = self._strip_protocol(path)
766
+ append_slash_to_dirname = ends_with_sep or path.endswith(
767
+ tuple(sep + "**" for sep in seps)
768
+ )
769
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
770
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
771
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
772
+
773
+ min_idx = min(idx_star, idx_qmark, idx_brace)
774
+
775
+ detail = kwargs.pop("detail", False)
776
+
777
+ if not has_magic(path):
778
+ if await self._exists(path, **kwargs):
779
+ if not detail:
780
+ return [path]
781
+ else:
782
+ return {path: await self._info(path, **kwargs)}
783
+ else:
784
+ if not detail:
785
+ return [] # glob of non-existent returns empty
786
+ else:
787
+ return {}
788
+ elif "/" in path[:min_idx]:
789
+ min_idx = path[:min_idx].rindex("/")
790
+ root = path[: min_idx + 1]
791
+ depth = path[min_idx + 1 :].count("/") + 1
792
+ else:
793
+ root = ""
794
+ depth = path[min_idx + 1 :].count("/") + 1
795
+
796
+ if "**" in path:
797
+ if maxdepth is not None:
798
+ idx_double_stars = path.find("**")
799
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
800
+ depth = depth - depth_double_stars + maxdepth
801
+ else:
802
+ depth = None
803
+
804
+ allpaths = await self._find(
805
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
806
+ )
807
+
808
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
809
+ pattern = re.compile(pattern)
810
+
811
+ out = {
812
+ p: info
813
+ for p, info in sorted(allpaths.items())
814
+ if pattern.match(
815
+ (
816
+ p + "/"
817
+ if append_slash_to_dirname and info["type"] == "directory"
818
+ else p
819
+ )
820
+ )
821
+ }
822
+
823
+ if detail:
824
+ return out
825
+ else:
826
+ return list(out)
827
+
828
+ async def _du(self, path, total=True, maxdepth=None, **kwargs):
829
+ sizes = {}
830
+ # async for?
831
+ for f in await self._find(path, maxdepth=maxdepth, **kwargs):
832
+ info = await self._info(f)
833
+ sizes[info["name"]] = info["size"]
834
+ if total:
835
+ return sum(sizes.values())
836
+ else:
837
+ return sizes
838
+
839
+ async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
840
+ path = self._strip_protocol(path)
841
+ out = {}
842
+ detail = kwargs.pop("detail", False)
843
+
844
+ # Add the root directory if withdirs is requested
845
+ # This is needed for posix glob compliance
846
+ if withdirs and path != "" and await self._isdir(path):
847
+ out[path] = await self._info(path)
848
+
849
+ # async for?
850
+ async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
851
+ if withdirs:
852
+ files.update(dirs)
853
+ out.update({info["name"]: info for name, info in files.items()})
854
+ if not out and (await self._isfile(path)):
855
+ # walk works on directories, but find should also return [path]
856
+ # when path happens to be a file
857
+ out[path] = {}
858
+ names = sorted(out)
859
+ if not detail:
860
+ return names
861
+ else:
862
+ return {name: out[name] for name in names}
863
+
864
+ async def _expand_path(self, path, recursive=False, maxdepth=None):
865
+ if maxdepth is not None and maxdepth < 1:
866
+ raise ValueError("maxdepth must be at least 1")
867
+
868
+ if isinstance(path, str):
869
+ out = await self._expand_path([path], recursive, maxdepth)
870
+ else:
871
+ out = set()
872
+ path = [self._strip_protocol(p) for p in path]
873
+ for p in path: # can gather here
874
+ if has_magic(p):
875
+ bit = set(await self._glob(p, maxdepth=maxdepth))
876
+ out |= bit
877
+ if recursive:
878
+ # glob call above expanded one depth so if maxdepth is defined
879
+ # then decrement it in expand_path call below. If it is zero
880
+ # after decrementing then avoid expand_path call.
881
+ if maxdepth is not None and maxdepth <= 1:
882
+ continue
883
+ out |= set(
884
+ await self._expand_path(
885
+ list(bit),
886
+ recursive=recursive,
887
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
888
+ )
889
+ )
890
+ continue
891
+ elif recursive:
892
+ rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
893
+ out |= rec
894
+ if p not in out and (recursive is False or (await self._exists(p))):
895
+ # should only check once, for the root
896
+ out.add(p)
897
+ if not out:
898
+ raise FileNotFoundError(path)
899
+ return sorted(out)
900
+
901
+ async def _mkdir(self, path, create_parents=True, **kwargs):
902
+ pass # not necessary to implement, may not have directories
903
+
904
+ async def _makedirs(self, path, exist_ok=False):
905
+ pass # not necessary to implement, may not have directories
906
+
907
+ async def open_async(self, path, mode="rb", **kwargs):
908
+ if "b" not in mode or kwargs.get("compression"):
909
+ raise ValueError
910
+ raise NotImplementedError
911
+
912
+
913
+ def mirror_sync_methods(obj):
914
+ """Populate sync and async methods for obj
915
+
916
+ For each method will create a sync version if the name refers to an async method
917
+ (coroutine) and there is no override in the child class; will create an async
918
+ method for the corresponding sync method if there is no implementation.
919
+
920
+ Uses the methods specified in
921
+ - async_methods: the set that an implementation is expected to provide
922
+ - default_async_methods: that can be derived from their sync version in
923
+ AbstractFileSystem
924
+ - AsyncFileSystem: async-specific default coroutines
925
+ """
926
+ from fsspec import AbstractFileSystem
927
+
928
+ for method in async_methods + dir(AsyncFileSystem):
929
+ if not method.startswith("_"):
930
+ continue
931
+ smethod = method[1:]
932
+ if private.match(method):
933
+ isco = inspect.iscoroutinefunction(getattr(obj, method, None))
934
+ unsync = getattr(getattr(obj, smethod, False), "__func__", None)
935
+ is_default = unsync is getattr(AbstractFileSystem, smethod, "")
936
+ if isco and is_default:
937
+ mth = sync_wrapper(getattr(obj, method), obj=obj)
938
+ setattr(obj, smethod, mth)
939
+ if not mth.__doc__:
940
+ mth.__doc__ = getattr(
941
+ getattr(AbstractFileSystem, smethod, None), "__doc__", ""
942
+ )
943
+
944
+
945
+ class FSSpecCoroutineCancel(Exception):
946
+ pass
947
+
948
+
949
+ def _dump_running_tasks(
950
+ printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
951
+ ):
952
+ import traceback
953
+
954
+ tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
955
+ if printout:
956
+ [task.print_stack() for task in tasks]
957
+ out = [
958
+ {
959
+ "locals": task._coro.cr_frame.f_locals,
960
+ "file": task._coro.cr_frame.f_code.co_filename,
961
+ "firstline": task._coro.cr_frame.f_code.co_firstlineno,
962
+ "linelo": task._coro.cr_frame.f_lineno,
963
+ "stack": traceback.format_stack(task._coro.cr_frame),
964
+ "task": task if with_task else None,
965
+ }
966
+ for task in tasks
967
+ ]
968
+ if cancel:
969
+ for t in tasks:
970
+ cbs = t._callbacks
971
+ t.cancel()
972
+ asyncio.futures.Future.set_exception(t, exc)
973
+ asyncio.futures.Future.cancel(t)
974
+ [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
975
+ try:
976
+ t._coro.throw(exc) # exits coro, unless explicitly handled
977
+ except exc:
978
+ pass
979
+ return out
980
+
981
+
982
+ class AbstractAsyncStreamedFile(AbstractBufferedFile):
983
+ # no read buffering, and always auto-commit
984
+ # TODO: readahead might still be useful here, but needs async version
985
+
986
+ async def read(self, length=-1):
987
+ """
988
+ Return data from cache, or fetch pieces as necessary
989
+
990
+ Parameters
991
+ ----------
992
+ length: int (-1)
993
+ Number of bytes to read; if <0, all remaining bytes.
994
+ """
995
+ length = -1 if length is None else int(length)
996
+ if self.mode != "rb":
997
+ raise ValueError("File not in read mode")
998
+ if length < 0:
999
+ length = self.size - self.loc
1000
+ if self.closed:
1001
+ raise ValueError("I/O operation on closed file.")
1002
+ if length == 0:
1003
+ # don't even bother calling fetch
1004
+ return b""
1005
+ out = await self._fetch_range(self.loc, self.loc + length)
1006
+ self.loc += len(out)
1007
+ return out
1008
+
1009
+ async def write(self, data):
1010
+ """
1011
+ Write data to buffer.
1012
+
1013
+ Buffer only sent on flush() or if buffer is greater than
1014
+ or equal to blocksize.
1015
+
1016
+ Parameters
1017
+ ----------
1018
+ data: bytes
1019
+ Set of bytes to be written.
1020
+ """
1021
+ if self.mode not in {"wb", "ab"}:
1022
+ raise ValueError("File not in write mode")
1023
+ if self.closed:
1024
+ raise ValueError("I/O operation on closed file.")
1025
+ if self.forced:
1026
+ raise ValueError("This file has been force-flushed, can only close")
1027
+ out = self.buffer.write(data)
1028
+ self.loc += out
1029
+ if self.buffer.tell() >= self.blocksize:
1030
+ await self.flush()
1031
+ return out
1032
+
1033
+ async def close(self):
1034
+ """Close file
1035
+
1036
+ Finalizes writes, discards cache
1037
+ """
1038
+ if getattr(self, "_unclosable", False):
1039
+ return
1040
+ if self.closed:
1041
+ return
1042
+ if self.mode == "rb":
1043
+ self.cache = None
1044
+ else:
1045
+ if not self.forced:
1046
+ await self.flush(force=True)
1047
+
1048
+ if self.fs is not None:
1049
+ self.fs.invalidate_cache(self.path)
1050
+ self.fs.invalidate_cache(self.fs._parent(self.path))
1051
+
1052
+ self.closed = True
1053
+
1054
+ async def flush(self, force=False):
1055
+ if self.closed:
1056
+ raise ValueError("Flush on closed file")
1057
+ if force and self.forced:
1058
+ raise ValueError("Force flush cannot be called more than once")
1059
+ if force:
1060
+ self.forced = True
1061
+
1062
+ if self.mode not in {"wb", "ab"}:
1063
+ # no-op to flush on read-mode
1064
+ return
1065
+
1066
+ if not force and self.buffer.tell() < self.blocksize:
1067
+ # Defer write on small block
1068
+ return
1069
+
1070
+ if self.offset is None:
1071
+ # Initialize a multipart upload
1072
+ self.offset = 0
1073
+ try:
1074
+ await self._initiate_upload()
1075
+ except: # noqa: E722
1076
+ self.closed = True
1077
+ raise
1078
+
1079
+ if await self._upload_chunk(final=force) is not False:
1080
+ self.offset += self.buffer.seek(0, 2)
1081
+ self.buffer = io.BytesIO()
1082
+
1083
+ async def __aenter__(self):
1084
+ return self
1085
+
1086
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1087
+ await self.close()
1088
+
1089
+ async def _fetch_range(self, start, end):
1090
+ raise NotImplementedError
1091
+
1092
+ async def _initiate_upload(self):
1093
+ pass
1094
+
1095
+ async def _upload_chunk(self, final=False):
1096
+ raise NotImplementedError
llmeval-env/lib/python3.10/site-packages/fsspec/callbacks.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+
3
+
4
+ class Callback:
5
+ """
6
+ Base class and interface for callback mechanism
7
+
8
+ This class can be used directly for monitoring file transfers by
9
+ providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument,
10
+ below), or subclassed for more specialised behaviour.
11
+
12
+ Parameters
13
+ ----------
14
+ size: int (optional)
15
+ Nominal quantity for the value that corresponds to a complete
16
+ transfer, e.g., total number of tiles or total number of
17
+ bytes
18
+ value: int (0)
19
+ Starting internal counter value
20
+ hooks: dict or None
21
+ A dict of named functions to be called on each update. The signature
22
+ of these must be ``f(size, value, **kwargs)``
23
+ """
24
+
25
+ def __init__(self, size=None, value=0, hooks=None, **kwargs):
26
+ self.size = size
27
+ self.value = value
28
+ self.hooks = hooks or {}
29
+ self.kw = kwargs
30
+
31
+ def __enter__(self):
32
+ return self
33
+
34
+ def __exit__(self, *exc_args):
35
+ self.close()
36
+
37
+ def close(self):
38
+ """Close callback."""
39
+
40
+ def branched(self, path_1, path_2, **kwargs):
41
+ """
42
+ Return callback for child transfers
43
+
44
+ If this callback is operating at a higher level, e.g., put, which may
45
+ trigger transfers that can also be monitored. The function returns a callback
46
+ that has to be passed to the child method, e.g., put_file,
47
+ as `callback=` argument.
48
+
49
+ The implementation uses `callback.branch` for compatibility.
50
+ When implementing callbacks, it is recommended to override this function instead
51
+ of `branch` and avoid calling `super().branched(...)`.
52
+
53
+ Prefer using this function over `branch`.
54
+
55
+ Parameters
56
+ ----------
57
+ path_1: str
58
+ Child's source path
59
+ path_2: str
60
+ Child's destination path
61
+ **kwargs:
62
+ Arbitrary keyword arguments
63
+
64
+ Returns
65
+ -------
66
+ callback: Callback
67
+ A callback instance to be passed to the child method
68
+ """
69
+ self.branch(path_1, path_2, kwargs)
70
+ # mutate kwargs so that we can force the caller to pass "callback=" explicitly
71
+ return kwargs.pop("callback", DEFAULT_CALLBACK)
72
+
73
+ def branch_coro(self, fn):
74
+ """
75
+ Wraps a coroutine, and pass a new child callback to it.
76
+ """
77
+
78
+ @wraps(fn)
79
+ async def func(path1, path2: str, **kwargs):
80
+ with self.branched(path1, path2, **kwargs) as child:
81
+ return await fn(path1, path2, callback=child, **kwargs)
82
+
83
+ return func
84
+
85
+ def set_size(self, size):
86
+ """
87
+ Set the internal maximum size attribute
88
+
89
+ Usually called if not initially set at instantiation. Note that this
90
+ triggers a ``call()``.
91
+
92
+ Parameters
93
+ ----------
94
+ size: int
95
+ """
96
+ self.size = size
97
+ self.call()
98
+
99
+ def absolute_update(self, value):
100
+ """
101
+ Set the internal value state
102
+
103
+ Triggers ``call()``
104
+
105
+ Parameters
106
+ ----------
107
+ value: int
108
+ """
109
+ self.value = value
110
+ self.call()
111
+
112
+ def relative_update(self, inc=1):
113
+ """
114
+ Delta increment the internal counter
115
+
116
+ Triggers ``call()``
117
+
118
+ Parameters
119
+ ----------
120
+ inc: int
121
+ """
122
+ self.value += inc
123
+ self.call()
124
+
125
+ def call(self, hook_name=None, **kwargs):
126
+ """
127
+ Execute hook(s) with current state
128
+
129
+ Each function is passed the internal size and current value
130
+
131
+ Parameters
132
+ ----------
133
+ hook_name: str or None
134
+ If given, execute on this hook
135
+ kwargs: passed on to (all) hook(s)
136
+ """
137
+ if not self.hooks:
138
+ return
139
+ kw = self.kw.copy()
140
+ kw.update(kwargs)
141
+ if hook_name:
142
+ if hook_name not in self.hooks:
143
+ return
144
+ return self.hooks[hook_name](self.size, self.value, **kw)
145
+ for hook in self.hooks.values() or []:
146
+ hook(self.size, self.value, **kw)
147
+
148
+ def wrap(self, iterable):
149
+ """
150
+ Wrap an iterable to call ``relative_update`` on each iterations
151
+
152
+ Parameters
153
+ ----------
154
+ iterable: Iterable
155
+ The iterable that is being wrapped
156
+ """
157
+ for item in iterable:
158
+ self.relative_update()
159
+ yield item
160
+
161
+ def branch(self, path_1, path_2, kwargs):
162
+ """
163
+ Set callbacks for child transfers
164
+
165
+ If this callback is operating at a higher level, e.g., put, which may
166
+ trigger transfers that can also be monitored. The passed kwargs are
167
+ to be *mutated* to add ``callback=``, if this class supports branching
168
+ to children.
169
+
170
+ Parameters
171
+ ----------
172
+ path_1: str
173
+ Child's source path
174
+ path_2: str
175
+ Child's destination path
176
+ kwargs: dict
177
+ arguments passed to child method, e.g., put_file.
178
+
179
+ Returns
180
+ -------
181
+
182
+ """
183
+ return None
184
+
185
+ def no_op(self, *_, **__):
186
+ pass
187
+
188
+ def __getattr__(self, item):
189
+ """
190
+ If undefined methods are called on this class, nothing happens
191
+ """
192
+ return self.no_op
193
+
194
+ @classmethod
195
+ def as_callback(cls, maybe_callback=None):
196
+ """Transform callback=... into Callback instance
197
+
198
+ For the special value of ``None``, return the global instance of
199
+ ``NoOpCallback``. This is an alternative to including
200
+ ``callback=DEFAULT_CALLBACK`` directly in a method signature.
201
+ """
202
+ if maybe_callback is None:
203
+ return DEFAULT_CALLBACK
204
+ return maybe_callback
205
+
206
+
207
+ class NoOpCallback(Callback):
208
+ """
209
+ This implementation of Callback does exactly nothing
210
+ """
211
+
212
+ def call(self, *args, **kwargs):
213
+ return None
214
+
215
+
216
+ class DotPrinterCallback(Callback):
217
+ """
218
+ Simple example Callback implementation
219
+
220
+ Almost identical to Callback with a hook that prints a char; here we
221
+ demonstrate how the outer layer may print "#" and the inner layer "."
222
+ """
223
+
224
+ def __init__(self, chr_to_print="#", **kwargs):
225
+ self.chr = chr_to_print
226
+ super().__init__(**kwargs)
227
+
228
+ def branch(self, path_1, path_2, kwargs):
229
+ """Mutate kwargs to add new instance with different print char"""
230
+ kwargs["callback"] = DotPrinterCallback(".")
231
+
232
+ def call(self, **kwargs):
233
+ """Just outputs a character"""
234
+ print(self.chr, end="")
235
+
236
+
237
+ class TqdmCallback(Callback):
238
+ """
239
+ A callback to display a progress bar using tqdm
240
+
241
+ Parameters
242
+ ----------
243
+ tqdm_kwargs : dict, (optional)
244
+ Any argument accepted by the tqdm constructor.
245
+ See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_.
246
+ Will be forwarded to `tqdm_cls`.
247
+ tqdm_cls: (optional)
248
+ subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`.
249
+
250
+ Examples
251
+ --------
252
+ >>> import fsspec
253
+ >>> from fsspec.callbacks import TqdmCallback
254
+ >>> fs = fsspec.filesystem("memory")
255
+ >>> path2distant_data = "/your-path"
256
+ >>> fs.upload(
257
+ ".",
258
+ path2distant_data,
259
+ recursive=True,
260
+ callback=TqdmCallback(),
261
+ )
262
+
263
+ You can forward args to tqdm using the ``tqdm_kwargs`` parameter.
264
+
265
+ >>> fs.upload(
266
+ ".",
267
+ path2distant_data,
268
+ recursive=True,
269
+ callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}),
270
+ )
271
+
272
+ You can also customize the progress bar by passing a subclass of `tqdm`.
273
+
274
+ .. code-block:: python
275
+
276
+ class TqdmFormat(tqdm):
277
+ '''Provides a `total_time` format parameter'''
278
+ @property
279
+ def format_dict(self):
280
+ d = super().format_dict
281
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
282
+ d.update(total_time=self.format_interval(total_time) + " in total")
283
+ return d
284
+
285
+ >>> with TqdmCallback(
286
+ tqdm_kwargs={
287
+ "desc": "desc",
288
+ "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}",
289
+ },
290
+ tqdm_cls=TqdmFormat,
291
+ ) as callback:
292
+ fs.upload(".", path2distant_data, recursive=True, callback=callback)
293
+ """
294
+
295
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
296
+ try:
297
+ from tqdm import tqdm
298
+
299
+ except ImportError as exce:
300
+ raise ImportError(
301
+ "Using TqdmCallback requires tqdm to be installed"
302
+ ) from exce
303
+
304
+ self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm)
305
+ self._tqdm_kwargs = tqdm_kwargs or {}
306
+ self.tqdm = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def call(self, *args, **kwargs):
310
+ if self.tqdm is None:
311
+ self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs)
312
+ self.tqdm.total = self.size
313
+ self.tqdm.update(self.value - self.tqdm.n)
314
+
315
+ def close(self):
316
+ if self.tqdm is not None:
317
+ self.tqdm.close()
318
+ self.tqdm = None
319
+
320
+ def __del__(self):
321
+ return self.close()
322
+
323
+
324
+ DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
llmeval-env/lib/python3.10/site-packages/fsspec/compression.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper functions for a standard streaming compression API"""
2
+ from zipfile import ZipFile
3
+
4
+ import fsspec.utils
5
+ from fsspec.spec import AbstractBufferedFile
6
+
7
+
8
+ def noop_file(file, mode, **kwargs):
9
+ return file
10
+
11
+
12
+ # TODO: files should also be available as contexts
13
+ # should be functions of the form func(infile, mode=, **kwargs) -> file-like
14
+ compr = {None: noop_file}
15
+
16
+
17
+ def register_compression(name, callback, extensions, force=False):
18
+ """Register an "inferable" file compression type.
19
+
20
+ Registers transparent file compression type for use with fsspec.open.
21
+ Compression can be specified by name in open, or "infer"-ed for any files
22
+ ending with the given extensions.
23
+
24
+ Args:
25
+ name: (str) The compression type name. Eg. "gzip".
26
+ callback: A callable of form (infile, mode, **kwargs) -> file-like.
27
+ Accepts an input file-like object, the target mode and kwargs.
28
+ Returns a wrapped file-like object.
29
+ extensions: (str, Iterable[str]) A file extension, or list of file
30
+ extensions for which to infer this compression scheme. Eg. "gz".
31
+ force: (bool) Force re-registration of compression type or extensions.
32
+
33
+ Raises:
34
+ ValueError: If name or extensions already registered, and not force.
35
+
36
+ """
37
+ if isinstance(extensions, str):
38
+ extensions = [extensions]
39
+
40
+ # Validate registration
41
+ if name in compr and not force:
42
+ raise ValueError(f"Duplicate compression registration: {name}")
43
+
44
+ for ext in extensions:
45
+ if ext in fsspec.utils.compressions and not force:
46
+ raise ValueError(f"Duplicate compression file extension: {ext} ({name})")
47
+
48
+ compr[name] = callback
49
+
50
+ for ext in extensions:
51
+ fsspec.utils.compressions[ext] = name
52
+
53
+
54
+ def unzip(infile, mode="rb", filename=None, **kwargs):
55
+ if "r" not in mode:
56
+ filename = filename or "file"
57
+ z = ZipFile(infile, mode="w", **kwargs)
58
+ fo = z.open(filename, mode="w")
59
+ fo.close = lambda closer=fo.close: closer() or z.close()
60
+ return fo
61
+ z = ZipFile(infile)
62
+ if filename is None:
63
+ filename = z.namelist()[0]
64
+ return z.open(filename, mode="r", **kwargs)
65
+
66
+
67
+ register_compression("zip", unzip, "zip")
68
+
69
+ try:
70
+ from bz2 import BZ2File
71
+ except ImportError:
72
+ pass
73
+ else:
74
+ register_compression("bz2", BZ2File, "bz2")
75
+
76
+ try: # pragma: no cover
77
+ from isal import igzip
78
+
79
+ def isal(infile, mode="rb", **kwargs):
80
+ return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs)
81
+
82
+ register_compression("gzip", isal, "gz")
83
+ except ImportError:
84
+ from gzip import GzipFile
85
+
86
+ register_compression(
87
+ "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz"
88
+ )
89
+
90
+ try:
91
+ from lzma import LZMAFile
92
+
93
+ register_compression("lzma", LZMAFile, "lzma")
94
+ register_compression("xz", LZMAFile, "xz")
95
+ except ImportError:
96
+ pass
97
+
98
+ try:
99
+ import lzmaffi
100
+
101
+ register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True)
102
+ register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
103
+ except ImportError:
104
+ pass
105
+
106
+
107
+ class SnappyFile(AbstractBufferedFile):
108
+ def __init__(self, infile, mode, **kwargs):
109
+ import snappy
110
+
111
+ super().__init__(
112
+ fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs
113
+ )
114
+ self.infile = infile
115
+ if "r" in mode:
116
+ self.codec = snappy.StreamDecompressor()
117
+ else:
118
+ self.codec = snappy.StreamCompressor()
119
+
120
+ def _upload_chunk(self, final=False):
121
+ self.buffer.seek(0)
122
+ out = self.codec.add_chunk(self.buffer.read())
123
+ self.infile.write(out)
124
+ return True
125
+
126
+ def seek(self, loc, whence=0):
127
+ raise NotImplementedError("SnappyFile is not seekable")
128
+
129
+ def seekable(self):
130
+ return False
131
+
132
+ def _fetch_range(self, start, end):
133
+ """Get the specified set of bytes from remote"""
134
+ data = self.infile.read(end - start)
135
+ return self.codec.decompress(data)
136
+
137
+
138
+ try:
139
+ import snappy
140
+
141
+ snappy.compress
142
+ # Snappy may use the .sz file extension, but this is not part of the
143
+ # standard implementation.
144
+ register_compression("snappy", SnappyFile, [])
145
+
146
+ except (ImportError, NameError, AttributeError):
147
+ pass
148
+
149
+ try:
150
+ import lz4.frame
151
+
152
+ register_compression("lz4", lz4.frame.open, "lz4")
153
+ except ImportError:
154
+ pass
155
+
156
+ try:
157
+ import zstandard as zstd
158
+
159
+ def zstandard_file(infile, mode="rb"):
160
+ if "r" in mode:
161
+ cctx = zstd.ZstdDecompressor()
162
+ return cctx.stream_reader(infile)
163
+ else:
164
+ cctx = zstd.ZstdCompressor(level=10)
165
+ return cctx.stream_writer(infile)
166
+
167
+ register_compression("zstd", zstandard_file, "zst")
168
+ except ImportError:
169
+ pass
170
+
171
+
172
+ def available_compressions():
173
+ """Return a list of the implemented compressions."""
174
+ return list(compr)
llmeval-env/lib/python3.10/site-packages/fsspec/config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import configparser
4
+ import json
5
+ import os
6
+ import warnings
7
+ from typing import Any
8
+
9
+ conf: dict[str, dict[str, Any]] = {}
10
+ default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
11
+ conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
12
+
13
+
14
+ def set_conf_env(conf_dict, envdict=os.environ):
15
+ """Set config values from environment variables
16
+
17
+ Looks for variables of the form ``FSSPEC_<protocol>`` and
18
+ ``FSSPEC_<protocol>_<kwarg>``. For ``FSSPEC_<protocol>`` the value is parsed
19
+ as a json dictionary and used to ``update`` the config of the
20
+ corresponding protocol. For ``FSSPEC_<protocol>_<kwarg>`` there is no
21
+ attempt to convert the string value, but the kwarg keys will be lower-cased.
22
+
23
+ The ``FSSPEC_<protocol>_<kwarg>`` variables are applied after the
24
+ ``FSSPEC_<protocol>`` ones.
25
+
26
+ Parameters
27
+ ----------
28
+ conf_dict : dict(str, dict)
29
+ This dict will be mutated
30
+ envdict : dict-like(str, str)
31
+ Source for the values - usually the real environment
32
+ """
33
+ kwarg_keys = []
34
+ for key in envdict:
35
+ if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_":
36
+ if key.count("_") > 1:
37
+ kwarg_keys.append(key)
38
+ continue
39
+ try:
40
+ value = json.loads(envdict[key])
41
+ except json.decoder.JSONDecodeError as ex:
42
+ warnings.warn(
43
+ f"Ignoring environment variable {key} due to a parse failure: {ex}"
44
+ )
45
+ else:
46
+ if isinstance(value, dict):
47
+ _, proto = key.split("_", 1)
48
+ conf_dict.setdefault(proto.lower(), {}).update(value)
49
+ else:
50
+ warnings.warn(
51
+ f"Ignoring environment variable {key} due to not being a dict:"
52
+ f" {type(value)}"
53
+ )
54
+ elif key.startswith("FSSPEC"):
55
+ warnings.warn(
56
+ f"Ignoring environment variable {key} due to having an unexpected name"
57
+ )
58
+
59
+ for key in kwarg_keys:
60
+ _, proto, kwarg = key.split("_", 2)
61
+ conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
62
+
63
+
64
+ def set_conf_files(cdir, conf_dict):
65
+ """Set config values from files
66
+
67
+ Scans for INI and JSON files in the given dictionary, and uses their
68
+ contents to set the config. In case of repeated values, later values
69
+ win.
70
+
71
+ In the case of INI files, all values are strings, and these will not
72
+ be converted.
73
+
74
+ Parameters
75
+ ----------
76
+ cdir : str
77
+ Directory to search
78
+ conf_dict : dict(str, dict)
79
+ This dict will be mutated
80
+ """
81
+ if not os.path.isdir(cdir):
82
+ return
83
+ allfiles = sorted(os.listdir(cdir))
84
+ for fn in allfiles:
85
+ if fn.endswith(".ini"):
86
+ ini = configparser.ConfigParser()
87
+ ini.read(os.path.join(cdir, fn))
88
+ for key in ini:
89
+ if key == "DEFAULT":
90
+ continue
91
+ conf_dict.setdefault(key, {}).update(dict(ini[key]))
92
+ if fn.endswith(".json"):
93
+ with open(os.path.join(cdir, fn)) as f:
94
+ js = json.load(f)
95
+ for key in js:
96
+ conf_dict.setdefault(key, {}).update(dict(js[key]))
97
+
98
+
99
+ def apply_config(cls, kwargs, conf_dict=None):
100
+ """Supply default values for kwargs when instantiating class
101
+
102
+ Augments the passed kwargs, by finding entries in the config dict
103
+ which match the classes ``.protocol`` attribute (one or more str)
104
+
105
+ Parameters
106
+ ----------
107
+ cls : file system implementation
108
+ kwargs : dict
109
+ conf_dict : dict of dict
110
+ Typically this is the global configuration
111
+
112
+ Returns
113
+ -------
114
+ dict : the modified set of kwargs
115
+ """
116
+ if conf_dict is None:
117
+ conf_dict = conf
118
+ protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
119
+ kw = {}
120
+ for proto in protos:
121
+ # default kwargs from the current state of the config
122
+ if proto in conf_dict:
123
+ kw.update(conf_dict[proto])
124
+ # explicit kwargs always win
125
+ kw.update(**kwargs)
126
+ kwargs = kw
127
+ return kwargs
128
+
129
+
130
+ set_conf_files(conf_dir, conf)
131
+ set_conf_env(conf)
llmeval-env/lib/python3.10/site-packages/fsspec/conftest.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import time
6
+
7
+ import pytest
8
+
9
+ import fsspec
10
+ from fsspec.implementations.cached import CachingFileSystem
11
+
12
+
13
+ @pytest.fixture()
14
+ def m():
15
+ """
16
+ Fixture providing a memory filesystem.
17
+ """
18
+ m = fsspec.filesystem("memory")
19
+ m.store.clear()
20
+ m.pseudo_dirs.clear()
21
+ m.pseudo_dirs.append("")
22
+ try:
23
+ yield m
24
+ finally:
25
+ m.store.clear()
26
+ m.pseudo_dirs.clear()
27
+ m.pseudo_dirs.append("")
28
+
29
+
30
+ @pytest.fixture
31
+ def ftp_writable(tmpdir):
32
+ """
33
+ Fixture providing a writable FTP filesystem.
34
+ """
35
+ pytest.importorskip("pyftpdlib")
36
+ from fsspec.implementations.ftp import FTPFileSystem
37
+
38
+ FTPFileSystem.clear_instance_cache() # remove lingering connections
39
+ CachingFileSystem.clear_instance_cache()
40
+ d = str(tmpdir)
41
+ with open(os.path.join(d, "out"), "wb") as f:
42
+ f.write(b"hello" * 10000)
43
+ P = subprocess.Popen(
44
+ [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
45
+ )
46
+ try:
47
+ time.sleep(1)
48
+ yield "localhost", 2121, "user", "pass"
49
+ finally:
50
+ P.terminate()
51
+ P.wait()
52
+ try:
53
+ shutil.rmtree(tmpdir)
54
+ except Exception:
55
+ pass
llmeval-env/lib/python3.10/site-packages/fsspec/core.py ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import logging
5
+ import os
6
+ import re
7
+ from glob import has_magic
8
+ from pathlib import Path
9
+
10
+ # for backwards compat, we export cache things from here too
11
+ from .caching import ( # noqa: F401
12
+ BaseCache,
13
+ BlockCache,
14
+ BytesCache,
15
+ MMapCache,
16
+ ReadAheadCache,
17
+ caches,
18
+ )
19
+ from .compression import compr
20
+ from .registry import filesystem, get_filesystem_class
21
+ from .utils import (
22
+ _unstrip_protocol,
23
+ build_name_function,
24
+ infer_compression,
25
+ stringify_path,
26
+ )
27
+
28
+ logger = logging.getLogger("fsspec")
29
+
30
+
31
+ class OpenFile:
32
+ """
33
+ File-like object to be used in a context
34
+
35
+ Can layer (buffered) text-mode and compression over any file-system, which
36
+ are typically binary-only.
37
+
38
+ These instances are safe to serialize, as the low-level file object
39
+ is not created until invoked using ``with``.
40
+
41
+ Parameters
42
+ ----------
43
+ fs: FileSystem
44
+ The file system to use for opening the file. Should be a subclass or duck-type
45
+ with ``fsspec.spec.AbstractFileSystem``
46
+ path: str
47
+ Location to open
48
+ mode: str like 'rb', optional
49
+ Mode of the opened file
50
+ compression: str or None, optional
51
+ Compression to apply
52
+ encoding: str or None, optional
53
+ The encoding to use if opened in text mode.
54
+ errors: str or None, optional
55
+ How to handle encoding errors if opened in text mode.
56
+ newline: None or str
57
+ Passed to TextIOWrapper in text mode, how to handle line endings.
58
+ autoopen: bool
59
+ If True, calls open() immediately. Mostly used by pickle
60
+ pos: int
61
+ If given and autoopen is True, seek to this location immediately
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ fs,
67
+ path,
68
+ mode="rb",
69
+ compression=None,
70
+ encoding=None,
71
+ errors=None,
72
+ newline=None,
73
+ ):
74
+ self.fs = fs
75
+ self.path = path
76
+ self.mode = mode
77
+ self.compression = get_compression(path, compression)
78
+ self.encoding = encoding
79
+ self.errors = errors
80
+ self.newline = newline
81
+ self.fobjects = []
82
+
83
+ def __reduce__(self):
84
+ return (
85
+ OpenFile,
86
+ (
87
+ self.fs,
88
+ self.path,
89
+ self.mode,
90
+ self.compression,
91
+ self.encoding,
92
+ self.errors,
93
+ self.newline,
94
+ ),
95
+ )
96
+
97
+ def __repr__(self):
98
+ return f"<OpenFile '{self.path}'>"
99
+
100
+ def __enter__(self):
101
+ mode = self.mode.replace("t", "").replace("b", "") + "b"
102
+
103
+ f = self.fs.open(self.path, mode=mode)
104
+
105
+ self.fobjects = [f]
106
+
107
+ if self.compression is not None:
108
+ compress = compr[self.compression]
109
+ f = compress(f, mode=mode[0])
110
+ self.fobjects.append(f)
111
+
112
+ if "b" not in self.mode:
113
+ # assume, for example, that 'r' is equivalent to 'rt' as in builtin
114
+ f = PickleableTextIOWrapper(
115
+ f, encoding=self.encoding, errors=self.errors, newline=self.newline
116
+ )
117
+ self.fobjects.append(f)
118
+
119
+ return self.fobjects[-1]
120
+
121
+ def __exit__(self, *args):
122
+ self.close()
123
+
124
+ @property
125
+ def full_name(self):
126
+ return _unstrip_protocol(self.path, self.fs)
127
+
128
+ def open(self):
129
+ """Materialise this as a real open file without context
130
+
131
+ The OpenFile object should be explicitly closed to avoid enclosed file
132
+ instances persisting. You must, therefore, keep a reference to the OpenFile
133
+ during the life of the file-like it generates.
134
+ """
135
+ return self.__enter__()
136
+
137
+ def close(self):
138
+ """Close all encapsulated file objects"""
139
+ for f in reversed(self.fobjects):
140
+ if "r" not in self.mode and not f.closed:
141
+ f.flush()
142
+ f.close()
143
+ self.fobjects.clear()
144
+
145
+
146
+ class OpenFiles(list):
147
+ """List of OpenFile instances
148
+
149
+ Can be used in a single context, which opens and closes all of the
150
+ contained files. Normal list access to get the elements works as
151
+ normal.
152
+
153
+ A special case is made for caching filesystems - the files will
154
+ be down/uploaded together at the start or end of the context, and
155
+ this may happen concurrently, if the target filesystem supports it.
156
+ """
157
+
158
+ def __init__(self, *args, mode="rb", fs=None):
159
+ self.mode = mode
160
+ self.fs = fs
161
+ self.files = []
162
+ super().__init__(*args)
163
+
164
+ def __enter__(self):
165
+ if self.fs is None:
166
+ raise ValueError("Context has already been used")
167
+
168
+ fs = self.fs
169
+ while True:
170
+ if hasattr(fs, "open_many"):
171
+ # check for concurrent cache download; or set up for upload
172
+ self.files = fs.open_many(self)
173
+ return self.files
174
+ if hasattr(fs, "fs") and fs.fs is not None:
175
+ fs = fs.fs
176
+ else:
177
+ break
178
+ return [s.__enter__() for s in self]
179
+
180
+ def __exit__(self, *args):
181
+ fs = self.fs
182
+ [s.__exit__(*args) for s in self]
183
+ if "r" not in self.mode:
184
+ while True:
185
+ if hasattr(fs, "open_many"):
186
+ # check for concurrent cache upload
187
+ fs.commit_many(self.files)
188
+ return
189
+ if hasattr(fs, "fs") and fs.fs is not None:
190
+ fs = fs.fs
191
+ else:
192
+ break
193
+
194
+ def __getitem__(self, item):
195
+ out = super().__getitem__(item)
196
+ if isinstance(item, slice):
197
+ return OpenFiles(out, mode=self.mode, fs=self.fs)
198
+ return out
199
+
200
+ def __repr__(self):
201
+ return f"<List of {len(self)} OpenFile instances>"
202
+
203
+
204
+ def open_files(
205
+ urlpath,
206
+ mode="rb",
207
+ compression=None,
208
+ encoding="utf8",
209
+ errors=None,
210
+ name_function=None,
211
+ num=1,
212
+ protocol=None,
213
+ newline=None,
214
+ auto_mkdir=True,
215
+ expand=True,
216
+ **kwargs,
217
+ ):
218
+ """Given a path or paths, return a list of ``OpenFile`` objects.
219
+
220
+ For writing, a str path must contain the "*" character, which will be filled
221
+ in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
222
+
223
+ For either reading or writing, can instead provide explicit list of paths.
224
+
225
+ Parameters
226
+ ----------
227
+ urlpath: string or list
228
+ Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
229
+ to read from alternative filesystems. To read from multiple files you
230
+ can pass a globstring or a list of paths, with the caveat that they
231
+ must all have the same protocol.
232
+ mode: 'rb', 'wt', etc.
233
+ compression: string or None
234
+ If given, open file using compression codec. Can either be a compression
235
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
236
+ compression from the filename suffix.
237
+ encoding: str
238
+ For text mode only
239
+ errors: None or str
240
+ Passed to TextIOWrapper in text mode
241
+ name_function: function or None
242
+ if opening a set of files for writing, those files do not yet exist,
243
+ so we need to generate their names by formatting the urlpath for
244
+ each sequence number
245
+ num: int [1]
246
+ if writing mode, number of files we expect to create (passed to
247
+ name+function)
248
+ protocol: str or None
249
+ If given, overrides the protocol found in the URL.
250
+ newline: bytes or None
251
+ Used for line terminator in text mode. If None, uses system default;
252
+ if blank, uses no translation.
253
+ auto_mkdir: bool (True)
254
+ If in write mode, this will ensure the target directory exists before
255
+ writing, by calling ``fs.mkdirs(exist_ok=True)``.
256
+ expand: bool
257
+ **kwargs: dict
258
+ Extra options that make sense to a particular storage connection, e.g.
259
+ host, port, username, password, etc.
260
+
261
+ Examples
262
+ --------
263
+ >>> files = open_files('2015-*-*.csv') # doctest: +SKIP
264
+ >>> files = open_files(
265
+ ... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
266
+ ... ) # doctest: +SKIP
267
+
268
+ Returns
269
+ -------
270
+ An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
271
+ be used as a single context
272
+
273
+ Notes
274
+ -----
275
+ For a full list of the available protocols and the implementations that
276
+ they map across to see the latest online documentation:
277
+
278
+ - For implementations built into ``fsspec`` see
279
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
280
+ - For implementations in separate packages see
281
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
282
+ """
283
+ fs, fs_token, paths = get_fs_token_paths(
284
+ urlpath,
285
+ mode,
286
+ num=num,
287
+ name_function=name_function,
288
+ storage_options=kwargs,
289
+ protocol=protocol,
290
+ expand=expand,
291
+ )
292
+ if fs.protocol == "file":
293
+ fs.auto_mkdir = auto_mkdir
294
+ elif "r" not in mode and auto_mkdir:
295
+ parents = {fs._parent(path) for path in paths}
296
+ for parent in parents:
297
+ try:
298
+ fs.makedirs(parent, exist_ok=True)
299
+ except PermissionError:
300
+ pass
301
+ return OpenFiles(
302
+ [
303
+ OpenFile(
304
+ fs,
305
+ path,
306
+ mode=mode,
307
+ compression=compression,
308
+ encoding=encoding,
309
+ errors=errors,
310
+ newline=newline,
311
+ )
312
+ for path in paths
313
+ ],
314
+ mode=mode,
315
+ fs=fs,
316
+ )
317
+
318
+
319
+ def _un_chain(path, kwargs):
320
+ x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
321
+ bits = (
322
+ [p if "://" in p or x.match(p) else p + "://" for p in path.split("::")]
323
+ if "::" in path
324
+ else [path]
325
+ )
326
+ # [[url, protocol, kwargs], ...]
327
+ out = []
328
+ previous_bit = None
329
+ kwargs = kwargs.copy()
330
+ for bit in reversed(bits):
331
+ protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
332
+ cls = get_filesystem_class(protocol)
333
+ extra_kwargs = cls._get_kwargs_from_urls(bit)
334
+ kws = kwargs.pop(protocol, {})
335
+ if bit is bits[0]:
336
+ kws.update(kwargs)
337
+ kw = dict(**extra_kwargs, **kws)
338
+ bit = cls._strip_protocol(bit)
339
+ if (
340
+ protocol in {"blockcache", "filecache", "simplecache"}
341
+ and "target_protocol" not in kw
342
+ ):
343
+ bit = previous_bit
344
+ out.append((bit, protocol, kw))
345
+ previous_bit = bit
346
+ out.reverse()
347
+ return out
348
+
349
+
350
+ def url_to_fs(url, **kwargs):
351
+ """
352
+ Turn fully-qualified and potentially chained URL into filesystem instance
353
+
354
+ Parameters
355
+ ----------
356
+ url : str
357
+ The fsspec-compatible URL
358
+ **kwargs: dict
359
+ Extra options that make sense to a particular storage connection, e.g.
360
+ host, port, username, password, etc.
361
+
362
+ Returns
363
+ -------
364
+ filesystem : FileSystem
365
+ The new filesystem discovered from ``url`` and created with
366
+ ``**kwargs``.
367
+ urlpath : str
368
+ The file-systems-specific URL for ``url``.
369
+ """
370
+ # non-FS arguments that appear in fsspec.open()
371
+ # inspect could keep this in sync with open()'s signature
372
+ known_kwargs = {
373
+ "compression",
374
+ "encoding",
375
+ "errors",
376
+ "expand",
377
+ "mode",
378
+ "name_function",
379
+ "newline",
380
+ "num",
381
+ }
382
+ kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs}
383
+ chain = _un_chain(url, kwargs)
384
+ inkwargs = {}
385
+ # Reverse iterate the chain, creating a nested target_* structure
386
+ for i, ch in enumerate(reversed(chain)):
387
+ urls, protocol, kw = ch
388
+ if i == len(chain) - 1:
389
+ inkwargs = dict(**kw, **inkwargs)
390
+ continue
391
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
392
+ inkwargs["target_protocol"] = protocol
393
+ inkwargs["fo"] = urls
394
+ urlpath, protocol, _ = chain[0]
395
+ fs = filesystem(protocol, **inkwargs)
396
+ return fs, urlpath
397
+
398
+
399
+ def open(
400
+ urlpath,
401
+ mode="rb",
402
+ compression=None,
403
+ encoding="utf8",
404
+ errors=None,
405
+ protocol=None,
406
+ newline=None,
407
+ **kwargs,
408
+ ):
409
+ """Given a path or paths, return one ``OpenFile`` object.
410
+
411
+ Parameters
412
+ ----------
413
+ urlpath: string or list
414
+ Absolute or relative filepath. Prefix with a protocol like ``s3://``
415
+ to read from alternative filesystems. Should not include glob
416
+ character(s).
417
+ mode: 'rb', 'wt', etc.
418
+ compression: string or None
419
+ If given, open file using compression codec. Can either be a compression
420
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
421
+ compression from the filename suffix.
422
+ encoding: str
423
+ For text mode only
424
+ errors: None or str
425
+ Passed to TextIOWrapper in text mode
426
+ protocol: str or None
427
+ If given, overrides the protocol found in the URL.
428
+ newline: bytes or None
429
+ Used for line terminator in text mode. If None, uses system default;
430
+ if blank, uses no translation.
431
+ **kwargs: dict
432
+ Extra options that make sense to a particular storage connection, e.g.
433
+ host, port, username, password, etc.
434
+
435
+ Examples
436
+ --------
437
+ >>> openfile = open('2015-01-01.csv') # doctest: +SKIP
438
+ >>> openfile = open(
439
+ ... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
440
+ ... ) # doctest: +SKIP
441
+ >>> with openfile as f:
442
+ ... df = pd.read_csv(f) # doctest: +SKIP
443
+ ...
444
+
445
+ Returns
446
+ -------
447
+ ``OpenFile`` object.
448
+
449
+ Notes
450
+ -----
451
+ For a full list of the available protocols and the implementations that
452
+ they map across to see the latest online documentation:
453
+
454
+ - For implementations built into ``fsspec`` see
455
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
456
+ - For implementations in separate packages see
457
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
458
+ """
459
+ kw = {"expand": False}
460
+ kw.update(kwargs)
461
+ out = open_files(
462
+ urlpath=[urlpath],
463
+ mode=mode,
464
+ compression=compression,
465
+ encoding=encoding,
466
+ errors=errors,
467
+ protocol=protocol,
468
+ newline=newline,
469
+ **kw,
470
+ )
471
+ if not out:
472
+ raise FileNotFoundError(urlpath)
473
+ return out[0]
474
+
475
+
476
+ def open_local(
477
+ url: str | list[str] | Path | list[Path],
478
+ mode: str = "rb",
479
+ **storage_options: dict,
480
+ ) -> str | list[str]:
481
+ """Open file(s) which can be resolved to local
482
+
483
+ For files which either are local, or get downloaded upon open
484
+ (e.g., by file caching)
485
+
486
+ Parameters
487
+ ----------
488
+ url: str or list(str)
489
+ mode: str
490
+ Must be read mode
491
+ storage_options:
492
+ passed on to FS for or used by open_files (e.g., compression)
493
+ """
494
+ if "r" not in mode:
495
+ raise ValueError("Can only ensure local files when reading")
496
+ of = open_files(url, mode=mode, **storage_options)
497
+ if not getattr(of[0].fs, "local_file", False):
498
+ raise ValueError(
499
+ "open_local can only be used on a filesystem which"
500
+ " has attribute local_file=True"
501
+ )
502
+ with of as files:
503
+ paths = [f.name for f in files]
504
+ if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path):
505
+ return paths[0]
506
+ return paths
507
+
508
+
509
+ def get_compression(urlpath, compression):
510
+ if compression == "infer":
511
+ compression = infer_compression(urlpath)
512
+ if compression is not None and compression not in compr:
513
+ raise ValueError(f"Compression type {compression} not supported")
514
+ return compression
515
+
516
+
517
+ def split_protocol(urlpath):
518
+ """Return protocol, path pair"""
519
+ urlpath = stringify_path(urlpath)
520
+ if "://" in urlpath:
521
+ protocol, path = urlpath.split("://", 1)
522
+ if len(protocol) > 1:
523
+ # excludes Windows paths
524
+ return protocol, path
525
+ if urlpath.startswith("data:"):
526
+ return urlpath.split(":", 1)
527
+ return None, urlpath
528
+
529
+
530
+ def strip_protocol(urlpath):
531
+ """Return only path part of full URL, according to appropriate backend"""
532
+ protocol, _ = split_protocol(urlpath)
533
+ cls = get_filesystem_class(protocol)
534
+ return cls._strip_protocol(urlpath)
535
+
536
+
537
+ def expand_paths_if_needed(paths, mode, num, fs, name_function):
538
+ """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
539
+ in them (read mode).
540
+
541
+ :param paths: list of paths
542
+ mode: str
543
+ Mode in which to open files.
544
+ num: int
545
+ If opening in writing mode, number of files we expect to create.
546
+ fs: filesystem object
547
+ name_function: callable
548
+ If opening in writing mode, this callable is used to generate path
549
+ names. Names are generated for each partition by
550
+ ``urlpath.replace('*', name_function(partition_index))``.
551
+ :return: list of paths
552
+ """
553
+ expanded_paths = []
554
+ paths = list(paths)
555
+
556
+ if "w" in mode: # read mode
557
+ if sum([1 for p in paths if "*" in p]) > 1:
558
+ raise ValueError(
559
+ "When writing data, only one filename mask can be specified."
560
+ )
561
+ num = max(num, len(paths))
562
+
563
+ for curr_path in paths:
564
+ if "*" in curr_path:
565
+ # expand using name_function
566
+ expanded_paths.extend(_expand_paths(curr_path, name_function, num))
567
+ else:
568
+ expanded_paths.append(curr_path)
569
+ # if we generated more paths that asked for, trim the list
570
+ if len(expanded_paths) > num:
571
+ expanded_paths = expanded_paths[:num]
572
+
573
+ else: # read mode
574
+ for curr_path in paths:
575
+ if has_magic(curr_path):
576
+ # expand using glob
577
+ expanded_paths.extend(fs.glob(curr_path))
578
+ else:
579
+ expanded_paths.append(curr_path)
580
+
581
+ return expanded_paths
582
+
583
+
584
+ def get_fs_token_paths(
585
+ urlpath,
586
+ mode="rb",
587
+ num=1,
588
+ name_function=None,
589
+ storage_options=None,
590
+ protocol=None,
591
+ expand=True,
592
+ ):
593
+ """Filesystem, deterministic token, and paths from a urlpath and options.
594
+
595
+ Parameters
596
+ ----------
597
+ urlpath: string or iterable
598
+ Absolute or relative filepath, URL (may include protocols like
599
+ ``s3://``), or globstring pointing to data.
600
+ mode: str, optional
601
+ Mode in which to open files.
602
+ num: int, optional
603
+ If opening in writing mode, number of files we expect to create.
604
+ name_function: callable, optional
605
+ If opening in writing mode, this callable is used to generate path
606
+ names. Names are generated for each partition by
607
+ ``urlpath.replace('*', name_function(partition_index))``.
608
+ storage_options: dict, optional
609
+ Additional keywords to pass to the filesystem class.
610
+ protocol: str or None
611
+ To override the protocol specifier in the URL
612
+ expand: bool
613
+ Expand string paths for writing, assuming the path is a directory
614
+ """
615
+ if isinstance(urlpath, (list, tuple, set)):
616
+ if not urlpath:
617
+ raise ValueError("empty urlpath sequence")
618
+ urlpath0 = stringify_path(list(urlpath)[0])
619
+ else:
620
+ urlpath0 = stringify_path(urlpath)
621
+ storage_options = storage_options or {}
622
+ if protocol:
623
+ storage_options["protocol"] = protocol
624
+ chain = _un_chain(urlpath0, storage_options or {})
625
+ inkwargs = {}
626
+ # Reverse iterate the chain, creating a nested target_* structure
627
+ for i, ch in enumerate(reversed(chain)):
628
+ urls, nested_protocol, kw = ch
629
+ if i == len(chain) - 1:
630
+ inkwargs = dict(**kw, **inkwargs)
631
+ continue
632
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
633
+ inkwargs["target_protocol"] = nested_protocol
634
+ inkwargs["fo"] = urls
635
+ paths, protocol, _ = chain[0]
636
+ fs = filesystem(protocol, **inkwargs)
637
+ if isinstance(urlpath, (list, tuple, set)):
638
+ pchains = [
639
+ _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
640
+ ]
641
+ if len({pc[1] for pc in pchains}) > 1:
642
+ raise ValueError("Protocol mismatch getting fs from %s", urlpath)
643
+ paths = [pc[0] for pc in pchains]
644
+ else:
645
+ paths = fs._strip_protocol(paths)
646
+ if isinstance(paths, (list, tuple, set)):
647
+ if expand:
648
+ paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
649
+ elif not isinstance(paths, list):
650
+ paths = list(paths)
651
+ else:
652
+ if "w" in mode and expand:
653
+ paths = _expand_paths(paths, name_function, num)
654
+ elif "x" in mode and expand:
655
+ paths = _expand_paths(paths, name_function, num)
656
+ elif "*" in paths:
657
+ paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
658
+ else:
659
+ paths = [paths]
660
+
661
+ return fs, fs._fs_token, paths
662
+
663
+
664
+ def _expand_paths(path, name_function, num):
665
+ if isinstance(path, str):
666
+ if path.count("*") > 1:
667
+ raise ValueError("Output path spec must contain exactly one '*'.")
668
+ elif "*" not in path:
669
+ path = os.path.join(path, "*.part")
670
+
671
+ if name_function is None:
672
+ name_function = build_name_function(num - 1)
673
+
674
+ paths = [path.replace("*", name_function(i)) for i in range(num)]
675
+ if paths != sorted(paths):
676
+ logger.warning(
677
+ "In order to preserve order between partitions"
678
+ " paths created with ``name_function`` should "
679
+ "sort to partition order"
680
+ )
681
+ elif isinstance(path, (tuple, list)):
682
+ assert len(path) == num
683
+ paths = list(path)
684
+ else:
685
+ raise ValueError(
686
+ "Path should be either\n"
687
+ "1. A list of paths: ['foo.json', 'bar.json', ...]\n"
688
+ "2. A directory: 'foo/\n"
689
+ "3. A path with a '*' in it: 'foo.*.json'"
690
+ )
691
+ return paths
692
+
693
+
694
+ class PickleableTextIOWrapper(io.TextIOWrapper):
695
+ """TextIOWrapper cannot be pickled. This solves it.
696
+
697
+ Requires that ``buffer`` be pickleable, which all instances of
698
+ AbstractBufferedFile are.
699
+ """
700
+
701
+ def __init__(
702
+ self,
703
+ buffer,
704
+ encoding=None,
705
+ errors=None,
706
+ newline=None,
707
+ line_buffering=False,
708
+ write_through=False,
709
+ ):
710
+ self.args = buffer, encoding, errors, newline, line_buffering, write_through
711
+ super().__init__(*self.args)
712
+
713
+ def __reduce__(self):
714
+ return PickleableTextIOWrapper, self.args
llmeval-env/lib/python3.10/site-packages/fsspec/exceptions.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ fsspec user-defined exception classes
3
+ """
4
+ import asyncio
5
+
6
+
7
+ class BlocksizeMismatchError(ValueError):
8
+ """
9
+ Raised when a cached file is opened with a different blocksize than it was
10
+ written with
11
+ """
12
+
13
+
14
+ class FSTimeoutError(asyncio.TimeoutError):
15
+ """
16
+ Raised when a fsspec function timed out occurs
17
+ """
llmeval-env/lib/python3.10/site-packages/fsspec/fuse.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import os
4
+ import stat
5
+ import threading
6
+ import time
7
+ from errno import EIO, ENOENT
8
+
9
+ from fuse import FUSE, FuseOSError, LoggingMixIn, Operations
10
+
11
+ from fsspec import __version__
12
+ from fsspec.core import url_to_fs
13
+
14
+ logger = logging.getLogger("fsspec.fuse")
15
+
16
+
17
+ class FUSEr(Operations):
18
+ def __init__(self, fs, path, ready_file=False):
19
+ self.fs = fs
20
+ self.cache = {}
21
+ self.root = path.rstrip("/") + "/"
22
+ self.counter = 0
23
+ logger.info("Starting FUSE at %s", path)
24
+ self._ready_file = ready_file
25
+
26
+ def getattr(self, path, fh=None):
27
+ logger.debug("getattr %s", path)
28
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
29
+ return {"type": "file", "st_size": 5}
30
+
31
+ path = "".join([self.root, path.lstrip("/")]).rstrip("/")
32
+ try:
33
+ info = self.fs.info(path)
34
+ except FileNotFoundError:
35
+ raise FuseOSError(ENOENT)
36
+
37
+ data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)}
38
+ perm = info.get("mode", 0o777)
39
+
40
+ if info["type"] != "file":
41
+ data["st_mode"] = stat.S_IFDIR | perm
42
+ data["st_size"] = 0
43
+ data["st_blksize"] = 0
44
+ else:
45
+ data["st_mode"] = stat.S_IFREG | perm
46
+ data["st_size"] = info["size"]
47
+ data["st_blksize"] = 5 * 2**20
48
+ data["st_nlink"] = 1
49
+ data["st_atime"] = info["atime"] if "atime" in info else time.time()
50
+ data["st_ctime"] = info["ctime"] if "ctime" in info else time.time()
51
+ data["st_mtime"] = info["mtime"] if "mtime" in info else time.time()
52
+ return data
53
+
54
+ def readdir(self, path, fh):
55
+ logger.debug("readdir %s", path)
56
+ path = "".join([self.root, path.lstrip("/")])
57
+ files = self.fs.ls(path, False)
58
+ files = [os.path.basename(f.rstrip("/")) for f in files]
59
+ return [".", ".."] + files
60
+
61
+ def mkdir(self, path, mode):
62
+ path = "".join([self.root, path.lstrip("/")])
63
+ self.fs.mkdir(path)
64
+ return 0
65
+
66
+ def rmdir(self, path):
67
+ path = "".join([self.root, path.lstrip("/")])
68
+ self.fs.rmdir(path)
69
+ return 0
70
+
71
+ def read(self, path, size, offset, fh):
72
+ logger.debug("read %s", (path, size, offset))
73
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
74
+ # status indicator
75
+ return b"ready"
76
+
77
+ f = self.cache[fh]
78
+ f.seek(offset)
79
+ out = f.read(size)
80
+ return out
81
+
82
+ def write(self, path, data, offset, fh):
83
+ logger.debug("write %s", (path, offset))
84
+ f = self.cache[fh]
85
+ f.seek(offset)
86
+ f.write(data)
87
+ return len(data)
88
+
89
+ def create(self, path, flags, fi=None):
90
+ logger.debug("create %s", (path, flags))
91
+ fn = "".join([self.root, path.lstrip("/")])
92
+ self.fs.touch(fn) # OS will want to get attributes immediately
93
+ f = self.fs.open(fn, "wb")
94
+ self.cache[self.counter] = f
95
+ self.counter += 1
96
+ return self.counter - 1
97
+
98
+ def open(self, path, flags):
99
+ logger.debug("open %s", (path, flags))
100
+ fn = "".join([self.root, path.lstrip("/")])
101
+ if flags % 2 == 0:
102
+ # read
103
+ mode = "rb"
104
+ else:
105
+ # write/create
106
+ mode = "wb"
107
+ self.cache[self.counter] = self.fs.open(fn, mode)
108
+ self.counter += 1
109
+ return self.counter - 1
110
+
111
+ def truncate(self, path, length, fh=None):
112
+ fn = "".join([self.root, path.lstrip("/")])
113
+ if length != 0:
114
+ raise NotImplementedError
115
+ # maybe should be no-op since open with write sets size to zero anyway
116
+ self.fs.touch(fn)
117
+
118
+ def unlink(self, path):
119
+ fn = "".join([self.root, path.lstrip("/")])
120
+ try:
121
+ self.fs.rm(fn, False)
122
+ except (OSError, FileNotFoundError):
123
+ raise FuseOSError(EIO)
124
+
125
+ def release(self, path, fh):
126
+ try:
127
+ if fh in self.cache:
128
+ f = self.cache[fh]
129
+ f.close()
130
+ self.cache.pop(fh)
131
+ except Exception as e:
132
+ print(e)
133
+ return 0
134
+
135
+ def chmod(self, path, mode):
136
+ if hasattr(self.fs, "chmod"):
137
+ path = "".join([self.root, path.lstrip("/")])
138
+ return self.fs.chmod(path, mode)
139
+ raise NotImplementedError
140
+
141
+
142
+ def run(
143
+ fs,
144
+ path,
145
+ mount_point,
146
+ foreground=True,
147
+ threads=False,
148
+ ready_file=False,
149
+ ops_class=FUSEr,
150
+ ):
151
+ """Mount stuff in a local directory
152
+
153
+ This uses fusepy to make it appear as if a given path on an fsspec
154
+ instance is in fact resident within the local file-system.
155
+
156
+ This requires that fusepy by installed, and that FUSE be available on
157
+ the system (typically requiring a package to be installed with
158
+ apt, yum, brew, etc.).
159
+
160
+ Parameters
161
+ ----------
162
+ fs: file-system instance
163
+ From one of the compatible implementations
164
+ path: str
165
+ Location on that file-system to regard as the root directory to
166
+ mount. Note that you typically should include the terminating "/"
167
+ character.
168
+ mount_point: str
169
+ An empty directory on the local file-system where the contents of
170
+ the remote path will appear.
171
+ foreground: bool
172
+ Whether or not calling this function will block. Operation will
173
+ typically be more stable if True.
174
+ threads: bool
175
+ Whether or not to create threads when responding to file operations
176
+ within the mounter directory. Operation will typically be more
177
+ stable if False.
178
+ ready_file: bool
179
+ Whether the FUSE process is ready. The ``.fuse_ready`` file will
180
+ exist in the ``mount_point`` directory if True. Debugging purpose.
181
+ ops_class: FUSEr or Subclass of FUSEr
182
+ To override the default behavior of FUSEr. For Example, logging
183
+ to file.
184
+
185
+ """
186
+ func = lambda: FUSE(
187
+ ops_class(fs, path, ready_file=ready_file),
188
+ mount_point,
189
+ nothreads=not threads,
190
+ foreground=foreground,
191
+ )
192
+ if not foreground:
193
+ th = threading.Thread(target=func)
194
+ th.daemon = True
195
+ th.start()
196
+ return th
197
+ else: # pragma: no cover
198
+ try:
199
+ func()
200
+ except KeyboardInterrupt:
201
+ pass
202
+
203
+
204
+ def main(args):
205
+ """Mount filesystem from chained URL to MOUNT_POINT.
206
+
207
+ Examples:
208
+
209
+ python3 -m fsspec.fuse memory /usr/share /tmp/mem
210
+
211
+ python3 -m fsspec.fuse local /tmp/source /tmp/local \\
212
+ -l /tmp/fsspecfuse.log
213
+
214
+ You can also mount chained-URLs and use special settings:
215
+
216
+ python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\
217
+ / /tmp/zip \\
218
+ -o 'filecache-cache_storage=/tmp/simplecache'
219
+
220
+ You can specify the type of the setting by using `[int]` or `[bool]`,
221
+ (`true`, `yes`, `1` represents the Boolean value `True`):
222
+
223
+ python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\
224
+ /historic/packages/RPMS /tmp/ftp \\
225
+ -o 'simplecache-cache_storage=/tmp/simplecache' \\
226
+ -o 'simplecache-check_files=false[bool]' \\
227
+ -o 'ftp-listings_expiry_time=60[int]' \\
228
+ -o 'ftp-username=anonymous' \\
229
+ -o 'ftp-password=xieyanbo'
230
+ """
231
+
232
+ class RawDescriptionArgumentParser(argparse.ArgumentParser):
233
+ def format_help(self):
234
+ usage = super().format_help()
235
+ parts = usage.split("\n\n")
236
+ parts[1] = self.description.rstrip()
237
+ return "\n\n".join(parts)
238
+
239
+ parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__)
240
+ parser.add_argument("--version", action="version", version=__version__)
241
+ parser.add_argument("url", type=str, help="fs url")
242
+ parser.add_argument("source_path", type=str, help="source directory in fs")
243
+ parser.add_argument("mount_point", type=str, help="local directory")
244
+ parser.add_argument(
245
+ "-o",
246
+ "--option",
247
+ action="append",
248
+ help="Any options of protocol included in the chained URL",
249
+ )
250
+ parser.add_argument(
251
+ "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')"
252
+ )
253
+ parser.add_argument(
254
+ "-f",
255
+ "--foreground",
256
+ action="store_false",
257
+ help="Running in foreground or not (Default: False)",
258
+ )
259
+ parser.add_argument(
260
+ "-t",
261
+ "--threads",
262
+ action="store_false",
263
+ help="Running with threads support (Default: False)",
264
+ )
265
+ parser.add_argument(
266
+ "-r",
267
+ "--ready-file",
268
+ action="store_false",
269
+ help="The `.fuse_ready` file will exist after FUSE is ready. "
270
+ "(Debugging purpose, Default: False)",
271
+ )
272
+ args = parser.parse_args(args)
273
+
274
+ kwargs = {}
275
+ for item in args.option or []:
276
+ key, sep, value = item.partition("=")
277
+ if not sep:
278
+ parser.error(message=f"Wrong option: {item!r}")
279
+ val = value.lower()
280
+ if val.endswith("[int]"):
281
+ value = int(value[: -len("[int]")])
282
+ elif val.endswith("[bool]"):
283
+ value = val[: -len("[bool]")] in ["1", "yes", "true"]
284
+
285
+ if "-" in key:
286
+ fs_name, setting_name = key.split("-", 1)
287
+ if fs_name in kwargs:
288
+ kwargs[fs_name][setting_name] = value
289
+ else:
290
+ kwargs[fs_name] = {setting_name: value}
291
+ else:
292
+ kwargs[key] = value
293
+
294
+ if args.log_file:
295
+ logging.basicConfig(
296
+ level=logging.DEBUG,
297
+ filename=args.log_file,
298
+ format="%(asctime)s %(message)s",
299
+ )
300
+
301
+ class LoggingFUSEr(FUSEr, LoggingMixIn):
302
+ pass
303
+
304
+ fuser = LoggingFUSEr
305
+ else:
306
+ fuser = FUSEr
307
+
308
+ fs, url_path = url_to_fs(args.url, **kwargs)
309
+ logger.debug("Mounting %s to %s", url_path, str(args.mount_point))
310
+ run(
311
+ fs,
312
+ args.source_path,
313
+ args.mount_point,
314
+ foreground=args.foreground,
315
+ threads=args.threads,
316
+ ready_file=args.ready_file,
317
+ ops_class=fuser,
318
+ )
319
+
320
+
321
+ if __name__ == "__main__":
322
+ import sys
323
+
324
+ main(sys.argv[1:])
llmeval-env/lib/python3.10/site-packages/fsspec/gui.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import contextlib
3
+ import logging
4
+ import os
5
+ import re
6
+ from typing import ClassVar, Sequence
7
+
8
+ import panel as pn
9
+
10
+ from .core import OpenFile, get_filesystem_class, split_protocol
11
+ from .registry import known_implementations
12
+
13
+ pn.extension()
14
+ logger = logging.getLogger("fsspec.gui")
15
+
16
+
17
+ class SigSlot:
18
+ """Signal-slot mixin, for Panel event passing
19
+
20
+ Include this class in a widget manager's superclasses to be able to
21
+ register events and callbacks on Panel widgets managed by that class.
22
+
23
+ The method ``_register`` should be called as widgets are added, and external
24
+ code should call ``connect`` to associate callbacks.
25
+
26
+ By default, all signals emit a DEBUG logging statement.
27
+ """
28
+
29
+ # names of signals that this class may emit each of which must be
30
+ # set by _register for any new instance
31
+ signals: ClassVar[Sequence[str]] = []
32
+ # names of actions that this class may respond to
33
+ slots: ClassVar[Sequence[str]] = []
34
+
35
+ # each of which must be a method name
36
+
37
+ def __init__(self):
38
+ self._ignoring_events = False
39
+ self._sigs = {}
40
+ self._map = {}
41
+ self._setup()
42
+
43
+ def _setup(self):
44
+ """Create GUI elements and register signals"""
45
+ self.panel = pn.pane.PaneBase()
46
+ # no signals to set up in the base class
47
+
48
+ def _register(
49
+ self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
50
+ ):
51
+ """Watch the given attribute of a widget and assign it a named event
52
+
53
+ This is normally called at the time a widget is instantiated, in the
54
+ class which owns it.
55
+
56
+ Parameters
57
+ ----------
58
+ widget : pn.layout.Panel or None
59
+ Widget to watch. If None, an anonymous signal not associated with
60
+ any widget.
61
+ name : str
62
+ Name of this event
63
+ thing : str
64
+ Attribute of the given widget to watch
65
+ log_level : int
66
+ When the signal is triggered, a logging event of the given level
67
+ will be fired in the dfviz logger.
68
+ auto : bool
69
+ If True, automatically connects with a method in this class of the
70
+ same name.
71
+ """
72
+ if name not in self.signals:
73
+ raise ValueError(f"Attempt to assign an undeclared signal: {name}")
74
+ self._sigs[name] = {
75
+ "widget": widget,
76
+ "callbacks": [],
77
+ "thing": thing,
78
+ "log": log_level,
79
+ }
80
+ wn = "-".join(
81
+ [
82
+ getattr(widget, "name", str(widget)) if widget is not None else "none",
83
+ thing,
84
+ ]
85
+ )
86
+ self._map[wn] = name
87
+ if widget is not None:
88
+ widget.param.watch(self._signal, thing, onlychanged=True)
89
+ if auto and hasattr(self, name):
90
+ self.connect(name, getattr(self, name))
91
+
92
+ def _repr_mimebundle_(self, *args, **kwargs):
93
+ """Display in a notebook or a server"""
94
+ try:
95
+ return self.panel._repr_mimebundle_(*args, **kwargs)
96
+ except (ValueError, AttributeError):
97
+ raise NotImplementedError("Panel does not seem to be set " "up properly")
98
+
99
+ def connect(self, signal, slot):
100
+ """Associate call back with given event
101
+
102
+ The callback must be a function which takes the "new" value of the
103
+ watched attribute as the only parameter. If the callback return False,
104
+ this cancels any further processing of the given event.
105
+
106
+ Alternatively, the callback can be a string, in which case it means
107
+ emitting the correspondingly-named event (i.e., connect to self)
108
+ """
109
+ self._sigs[signal]["callbacks"].append(slot)
110
+
111
+ def _signal(self, event):
112
+ """This is called by a an action on a widget
113
+
114
+ Within an self.ignore_events context, nothing happens.
115
+
116
+ Tests can execute this method by directly changing the values of
117
+ widget components.
118
+ """
119
+ if not self._ignoring_events:
120
+ wn = "-".join([event.obj.name, event.name])
121
+ if wn in self._map and self._map[wn] in self._sigs:
122
+ self._emit(self._map[wn], event.new)
123
+
124
+ @contextlib.contextmanager
125
+ def ignore_events(self):
126
+ """Temporarily turn off events processing in this instance
127
+
128
+ (does not propagate to children)
129
+ """
130
+ self._ignoring_events = True
131
+ try:
132
+ yield
133
+ finally:
134
+ self._ignoring_events = False
135
+
136
+ def _emit(self, sig, value=None):
137
+ """An event happened, call its callbacks
138
+
139
+ This method can be used in tests to simulate message passing without
140
+ directly changing visual elements.
141
+
142
+ Calling of callbacks will halt whenever one returns False.
143
+ """
144
+ logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
145
+ for callback in self._sigs[sig]["callbacks"]:
146
+ if isinstance(callback, str):
147
+ self._emit(callback)
148
+ else:
149
+ try:
150
+ # running callbacks should not break the interface
151
+ ret = callback(value)
152
+ if ret is False:
153
+ break
154
+ except Exception as e:
155
+ logger.exception(
156
+ "Exception (%s) while executing callback for signal: %s",
157
+ e,
158
+ sig,
159
+ )
160
+
161
+ def show(self, threads=False):
162
+ """Open a new browser tab and display this instance's interface"""
163
+ self.panel.show(threads=threads, verbose=False)
164
+ return self
165
+
166
+
167
+ class SingleSelect(SigSlot):
168
+ """A multiselect which only allows you to select one item for an event"""
169
+
170
+ signals = ["_selected", "selected"] # the first is internal
171
+ slots = ["set_options", "set_selection", "add", "clear", "select"]
172
+
173
+ def __init__(self, **kwargs):
174
+ self.kwargs = kwargs
175
+ super().__init__()
176
+
177
+ def _setup(self):
178
+ self.panel = pn.widgets.MultiSelect(**self.kwargs)
179
+ self._register(self.panel, "_selected", "value")
180
+ self._register(None, "selected")
181
+ self.connect("_selected", self.select_one)
182
+
183
+ def _signal(self, *args, **kwargs):
184
+ super()._signal(*args, **kwargs)
185
+
186
+ def select_one(self, *_):
187
+ with self.ignore_events():
188
+ val = [self.panel.value[-1]] if self.panel.value else []
189
+ self.panel.value = val
190
+ self._emit("selected", self.panel.value)
191
+
192
+ def set_options(self, options):
193
+ self.panel.options = options
194
+
195
+ def clear(self):
196
+ self.panel.options = []
197
+
198
+ @property
199
+ def value(self):
200
+ return self.panel.value
201
+
202
+ def set_selection(self, selection):
203
+ self.panel.value = [selection]
204
+
205
+
206
+ class FileSelector(SigSlot):
207
+ """Panel-based graphical file selector widget
208
+
209
+ Instances of this widget are interactive and can be displayed in jupyter by having
210
+ them as the output of a cell, or in a separate browser tab using ``.show()``.
211
+ """
212
+
213
+ signals = [
214
+ "protocol_changed",
215
+ "selection_changed",
216
+ "directory_entered",
217
+ "home_clicked",
218
+ "up_clicked",
219
+ "go_clicked",
220
+ "filters_changed",
221
+ ]
222
+ slots = ["set_filters", "go_home"]
223
+
224
+ def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
225
+ """
226
+
227
+ Parameters
228
+ ----------
229
+ url : str (optional)
230
+ Initial value of the URL to populate the dialog; should include protocol
231
+ filters : list(str) (optional)
232
+ File endings to include in the listings. If not included, all files are
233
+ allowed. Does not affect directories.
234
+ If given, the endings will appear as checkboxes in the interface
235
+ ignore : list(str) (optional)
236
+ Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
237
+ hidden files on posix
238
+ kwargs : dict (optional)
239
+ To pass to file system instance
240
+ """
241
+ if url:
242
+ self.init_protocol, url = split_protocol(url)
243
+ else:
244
+ self.init_protocol, url = "file", os.getcwd()
245
+ self.init_url = url
246
+ self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}"
247
+ self.filters = filters
248
+ self.ignore = [re.compile(i) for i in ignore or []]
249
+ self._fs = None
250
+ super().__init__()
251
+
252
+ def _setup(self):
253
+ self.url = pn.widgets.TextInput(
254
+ name="url",
255
+ value=self.init_url,
256
+ align="end",
257
+ sizing_mode="stretch_width",
258
+ width_policy="max",
259
+ )
260
+ self.protocol = pn.widgets.Select(
261
+ options=sorted(known_implementations),
262
+ value=self.init_protocol,
263
+ name="protocol",
264
+ align="center",
265
+ )
266
+ self.kwargs = pn.widgets.TextInput(
267
+ name="kwargs", value=self.init_kwargs, align="center"
268
+ )
269
+ self.go = pn.widgets.Button(name="⇨", align="end", width=45)
270
+ self.main = SingleSelect(size=10)
271
+ self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
272
+ self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
273
+
274
+ self._register(self.protocol, "protocol_changed", auto=True)
275
+ self._register(self.go, "go_clicked", "clicks", auto=True)
276
+ self._register(self.up, "up_clicked", "clicks", auto=True)
277
+ self._register(self.home, "home_clicked", "clicks", auto=True)
278
+ self._register(None, "selection_changed")
279
+ self.main.connect("selected", self.selection_changed)
280
+ self._register(None, "directory_entered")
281
+ self.prev_protocol = self.protocol.value
282
+ self.prev_kwargs = self.storage_options
283
+
284
+ self.filter_sel = pn.widgets.CheckBoxGroup(
285
+ value=[], options=[], inline=False, align="end", width_policy="min"
286
+ )
287
+ self._register(self.filter_sel, "filters_changed", auto=True)
288
+
289
+ self.panel = pn.Column(
290
+ pn.Row(self.protocol, self.kwargs),
291
+ pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
292
+ self.main.panel,
293
+ )
294
+ self.set_filters(self.filters)
295
+ self.go_clicked()
296
+
297
+ def set_filters(self, filters=None):
298
+ self.filters = filters
299
+ if filters:
300
+ self.filter_sel.options = filters
301
+ self.filter_sel.value = filters
302
+ else:
303
+ self.filter_sel.options = []
304
+ self.filter_sel.value = []
305
+
306
+ @property
307
+ def storage_options(self):
308
+ """Value of the kwargs box as a dictionary"""
309
+ return ast.literal_eval(self.kwargs.value) or {}
310
+
311
+ @property
312
+ def fs(self):
313
+ """Current filesystem instance"""
314
+ if self._fs is None:
315
+ cls = get_filesystem_class(self.protocol.value)
316
+ self._fs = cls(**self.storage_options)
317
+ return self._fs
318
+
319
+ @property
320
+ def urlpath(self):
321
+ """URL of currently selected item"""
322
+ return (
323
+ (f"{self.protocol.value}://{self.main.value[0]}")
324
+ if self.main.value
325
+ else None
326
+ )
327
+
328
+ def open_file(self, mode="rb", compression=None, encoding=None):
329
+ """Create OpenFile instance for the currently selected item
330
+
331
+ For example, in a notebook you might do something like
332
+
333
+ .. code-block::
334
+
335
+ [ ]: sel = FileSelector(); sel
336
+
337
+ # user selects their file
338
+
339
+ [ ]: with sel.open_file('rb') as f:
340
+ ... out = f.read()
341
+
342
+ Parameters
343
+ ----------
344
+ mode: str (optional)
345
+ Open mode for the file.
346
+ compression: str (optional)
347
+ The interact with the file as compressed. Set to 'infer' to guess
348
+ compression from the file ending
349
+ encoding: str (optional)
350
+ If using text mode, use this encoding; defaults to UTF8.
351
+ """
352
+ if self.urlpath is None:
353
+ raise ValueError("No file selected")
354
+ return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
355
+
356
+ def filters_changed(self, values):
357
+ self.filters = values
358
+ self.go_clicked()
359
+
360
+ def selection_changed(self, *_):
361
+ if self.urlpath is None:
362
+ return
363
+ if self.fs.isdir(self.urlpath):
364
+ self.url.value = self.fs._strip_protocol(self.urlpath)
365
+ self.go_clicked()
366
+
367
+ def go_clicked(self, *_):
368
+ if (
369
+ self.prev_protocol != self.protocol.value
370
+ or self.prev_kwargs != self.storage_options
371
+ ):
372
+ self._fs = None # causes fs to be recreated
373
+ self.prev_protocol = self.protocol.value
374
+ self.prev_kwargs = self.storage_options
375
+ listing = sorted(
376
+ self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
377
+ )
378
+ listing = [
379
+ l
380
+ for l in listing
381
+ if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
382
+ ]
383
+ folders = {
384
+ "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
385
+ for o in listing
386
+ if o["type"] == "directory"
387
+ }
388
+ files = {
389
+ "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
390
+ for o in listing
391
+ if o["type"] == "file"
392
+ }
393
+ if self.filters:
394
+ files = {
395
+ k: v
396
+ for k, v in files.items()
397
+ if any(v.endswith(ext) for ext in self.filters)
398
+ }
399
+ self.main.set_options(dict(**folders, **files))
400
+
401
+ def protocol_changed(self, *_):
402
+ self._fs = None
403
+ self.main.options = []
404
+ self.url.value = ""
405
+
406
+ def home_clicked(self, *_):
407
+ self.protocol.value = self.init_protocol
408
+ self.kwargs.value = self.init_kwargs
409
+ self.url.value = self.init_url
410
+ self.go_clicked()
411
+
412
+ def up_clicked(self, *_):
413
+ self.url.value = self.fs._parent(self.url.value)
414
+ self.go_clicked()
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/arrow.cpython-310.pyc ADDED
Binary file (9.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc ADDED
Binary file (3.28 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc ADDED
Binary file (7.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/data.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/ftp.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/github.cpython-310.pyc ADDED
Binary file (7.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/http.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc ADDED
Binary file (4.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc ADDED
Binary file (37.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc ADDED
Binary file (6.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc ADDED
Binary file (4.23 kB). View file