applied-ai-018 commited on
Commit
c4396cd
Β·
verified Β·
1 Parent(s): 775e6d7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/aiosignal/__init__.py +36 -0
  2. env-llmeval/lib/python3.10/site-packages/aiosignal/__init__.pyi +12 -0
  3. env-llmeval/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/aiosignal/py.typed +0 -0
  5. env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz +3 -0
  6. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__init__.py +0 -0
  7. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/arrow.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/data.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/ftp.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/github.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/http.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/smb.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/tar.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/arrow.py +306 -0
  31. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py +76 -0
  32. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py +232 -0
  33. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/cached.py +882 -0
  34. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/dask.py +152 -0
  35. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/dbfs.py +467 -0
  36. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/git.py +127 -0
  37. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/github.py +227 -0
  38. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/libarchive.py +213 -0
  39. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/local.py +418 -0
  40. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/reference.py +1160 -0
  41. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/smb.py +324 -0
  42. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/tar.py +124 -0
  43. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/webhdfs.py +486 -0
  44. env-llmeval/lib/python3.10/site-packages/fsspec/implementations/zip.py +133 -0
  45. env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py +287 -0
  46. env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/aiosignal/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from frozenlist import FrozenList
2
+
3
+ __version__ = "1.3.1"
4
+
5
+ __all__ = ("Signal",)
6
+
7
+
8
+ class Signal(FrozenList):
9
+ """Coroutine-based signal implementation.
10
+
11
+ To connect a callback to a signal, use any list method.
12
+
13
+ Signals are fired using the send() coroutine, which takes named
14
+ arguments.
15
+ """
16
+
17
+ __slots__ = ("_owner",)
18
+
19
+ def __init__(self, owner):
20
+ super().__init__()
21
+ self._owner = owner
22
+
23
+ def __repr__(self):
24
+ return "<Signal owner={}, frozen={}, {!r}>".format(
25
+ self._owner, self.frozen, list(self)
26
+ )
27
+
28
+ async def send(self, *args, **kwargs):
29
+ """
30
+ Sends data to all registered receivers.
31
+ """
32
+ if not self.frozen:
33
+ raise RuntimeError("Cannot send non-frozen signal.")
34
+
35
+ for receiver in self:
36
+ await receiver(*args, **kwargs) # type: ignore
env-llmeval/lib/python3.10/site-packages/aiosignal/__init__.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Generic, TypeVar
2
+
3
+ from frozenlist import FrozenList
4
+
5
+ __all__ = ("Signal",)
6
+
7
+ _T = TypeVar("_T")
8
+
9
+ class Signal(FrozenList[_T], Generic[_T]):
10
+ def __init__(self, owner: Any) -> None: ...
11
+ def __repr__(self) -> str: ...
12
+ async def send(self, *args: Any, **kwargs: Any) -> None: ...
env-llmeval/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/aiosignal/py.typed ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3ea52e7b6e968de0d884df1288193596fa95b803db4f92a18279a7398004475
3
+ size 156400
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/arrow.cpython-310.pyc ADDED
Binary file (9.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc ADDED
Binary file (7.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc ADDED
Binary file (27.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/data.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/ftp.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/github.cpython-310.pyc ADDED
Binary file (7.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/http.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc ADDED
Binary file (4.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-310.pyc ADDED
Binary file (6.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc ADDED
Binary file (7.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc ADDED
Binary file (37.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc ADDED
Binary file (6.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/smb.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/tar.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc ADDED
Binary file (4.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/arrow.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import io
3
+ import os
4
+ import secrets
5
+ import shutil
6
+ from contextlib import suppress
7
+ from functools import cached_property, wraps
8
+ from urllib.parse import parse_qs
9
+
10
+ from fsspec.spec import AbstractFileSystem
11
+ from fsspec.utils import (
12
+ get_package_version_without_import,
13
+ infer_storage_options,
14
+ mirror_from,
15
+ tokenize,
16
+ )
17
+
18
+
19
+ def wrap_exceptions(func):
20
+ @wraps(func)
21
+ def wrapper(*args, **kwargs):
22
+ try:
23
+ return func(*args, **kwargs)
24
+ except OSError as exception:
25
+ if not exception.args:
26
+ raise
27
+
28
+ message, *args = exception.args
29
+ if isinstance(message, str) and "does not exist" in message:
30
+ raise FileNotFoundError(errno.ENOENT, message) from exception
31
+ else:
32
+ raise
33
+
34
+ return wrapper
35
+
36
+
37
+ PYARROW_VERSION = None
38
+
39
+
40
+ class ArrowFSWrapper(AbstractFileSystem):
41
+ """FSSpec-compatible wrapper of pyarrow.fs.FileSystem.
42
+
43
+ Parameters
44
+ ----------
45
+ fs : pyarrow.fs.FileSystem
46
+
47
+ """
48
+
49
+ root_marker = "/"
50
+
51
+ def __init__(self, fs, **kwargs):
52
+ global PYARROW_VERSION
53
+ PYARROW_VERSION = get_package_version_without_import("pyarrow")
54
+ self.fs = fs
55
+ super().__init__(**kwargs)
56
+
57
+ @property
58
+ def protocol(self):
59
+ return self.fs.type_name
60
+
61
+ @cached_property
62
+ def fsid(self):
63
+ return "hdfs_" + tokenize(self.fs.host, self.fs.port)
64
+
65
+ @classmethod
66
+ def _strip_protocol(cls, path):
67
+ ops = infer_storage_options(path)
68
+ path = ops["path"]
69
+ if path.startswith("//"):
70
+ # special case for "hdfs://path" (without the triple slash)
71
+ path = path[1:]
72
+ return path
73
+
74
+ def ls(self, path, detail=False, **kwargs):
75
+ path = self._strip_protocol(path)
76
+ from pyarrow.fs import FileSelector
77
+
78
+ entries = [
79
+ self._make_entry(entry)
80
+ for entry in self.fs.get_file_info(FileSelector(path))
81
+ ]
82
+ if detail:
83
+ return entries
84
+ else:
85
+ return [entry["name"] for entry in entries]
86
+
87
+ def info(self, path, **kwargs):
88
+ path = self._strip_protocol(path)
89
+ [info] = self.fs.get_file_info([path])
90
+ return self._make_entry(info)
91
+
92
+ def exists(self, path):
93
+ path = self._strip_protocol(path)
94
+ try:
95
+ self.info(path)
96
+ except FileNotFoundError:
97
+ return False
98
+ else:
99
+ return True
100
+
101
+ def _make_entry(self, info):
102
+ from pyarrow.fs import FileType
103
+
104
+ if info.type is FileType.Directory:
105
+ kind = "directory"
106
+ elif info.type is FileType.File:
107
+ kind = "file"
108
+ elif info.type is FileType.NotFound:
109
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path)
110
+ else:
111
+ kind = "other"
112
+
113
+ return {
114
+ "name": info.path,
115
+ "size": info.size,
116
+ "type": kind,
117
+ "mtime": info.mtime,
118
+ }
119
+
120
+ @wrap_exceptions
121
+ def cp_file(self, path1, path2, **kwargs):
122
+ path1 = self._strip_protocol(path1).rstrip("/")
123
+ path2 = self._strip_protocol(path2).rstrip("/")
124
+
125
+ with self._open(path1, "rb") as lstream:
126
+ tmp_fname = f"{path2}.tmp.{secrets.token_hex(6)}"
127
+ try:
128
+ with self.open(tmp_fname, "wb") as rstream:
129
+ shutil.copyfileobj(lstream, rstream)
130
+ self.fs.move(tmp_fname, path2)
131
+ except BaseException: # noqa
132
+ with suppress(FileNotFoundError):
133
+ self.fs.delete_file(tmp_fname)
134
+ raise
135
+
136
+ @wrap_exceptions
137
+ def mv(self, path1, path2, **kwargs):
138
+ path1 = self._strip_protocol(path1).rstrip("/")
139
+ path2 = self._strip_protocol(path2).rstrip("/")
140
+ self.fs.move(path1, path2)
141
+
142
+ mv_file = mv
143
+
144
+ @wrap_exceptions
145
+ def rm_file(self, path):
146
+ path = self._strip_protocol(path)
147
+ self.fs.delete_file(path)
148
+
149
+ @wrap_exceptions
150
+ def rm(self, path, recursive=False, maxdepth=None):
151
+ path = self._strip_protocol(path).rstrip("/")
152
+ if self.isdir(path):
153
+ if recursive:
154
+ self.fs.delete_dir(path)
155
+ else:
156
+ raise ValueError("Can't delete directories without recursive=False")
157
+ else:
158
+ self.fs.delete_file(path)
159
+
160
+ @wrap_exceptions
161
+ def _open(self, path, mode="rb", block_size=None, seekable=True, **kwargs):
162
+ if mode == "rb":
163
+ if seekable:
164
+ method = self.fs.open_input_file
165
+ else:
166
+ method = self.fs.open_input_stream
167
+ elif mode == "wb":
168
+ method = self.fs.open_output_stream
169
+ elif mode == "ab":
170
+ method = self.fs.open_append_stream
171
+ else:
172
+ raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}")
173
+
174
+ _kwargs = {}
175
+ if mode != "rb" or not seekable:
176
+ if int(PYARROW_VERSION.split(".")[0]) >= 4:
177
+ # disable compression auto-detection
178
+ _kwargs["compression"] = None
179
+ stream = method(path, **_kwargs)
180
+
181
+ return ArrowFile(self, stream, path, mode, block_size, **kwargs)
182
+
183
+ @wrap_exceptions
184
+ def mkdir(self, path, create_parents=True, **kwargs):
185
+ path = self._strip_protocol(path)
186
+ if create_parents:
187
+ self.makedirs(path, exist_ok=True)
188
+ else:
189
+ self.fs.create_dir(path, recursive=False)
190
+
191
+ @wrap_exceptions
192
+ def makedirs(self, path, exist_ok=False):
193
+ path = self._strip_protocol(path)
194
+ self.fs.create_dir(path, recursive=True)
195
+
196
+ @wrap_exceptions
197
+ def rmdir(self, path):
198
+ path = self._strip_protocol(path)
199
+ self.fs.delete_dir(path)
200
+
201
+ @wrap_exceptions
202
+ def modified(self, path):
203
+ path = self._strip_protocol(path)
204
+ return self.fs.get_file_info(path).mtime
205
+
206
+ def cat_file(self, path, start=None, end=None, **kwargs):
207
+ kwargs["seekable"] = start not in [None, 0]
208
+ return super().cat_file(path, start=None, end=None, **kwargs)
209
+
210
+ def get_file(self, rpath, lpath, **kwargs):
211
+ kwargs["seekable"] = False
212
+ super().get_file(rpath, lpath, **kwargs)
213
+
214
+
215
+ @mirror_from(
216
+ "stream",
217
+ [
218
+ "read",
219
+ "seek",
220
+ "tell",
221
+ "write",
222
+ "readable",
223
+ "writable",
224
+ "close",
225
+ "size",
226
+ "seekable",
227
+ ],
228
+ )
229
+ class ArrowFile(io.IOBase):
230
+ def __init__(self, fs, stream, path, mode, block_size=None, **kwargs):
231
+ self.path = path
232
+ self.mode = mode
233
+
234
+ self.fs = fs
235
+ self.stream = stream
236
+
237
+ self.blocksize = self.block_size = block_size
238
+ self.kwargs = kwargs
239
+
240
+ def __enter__(self):
241
+ return self
242
+
243
+ def __exit__(self, *args):
244
+ return self.close()
245
+
246
+
247
+ class HadoopFileSystem(ArrowFSWrapper):
248
+ """A wrapper on top of the pyarrow.fs.HadoopFileSystem
249
+ to connect it's interface with fsspec"""
250
+
251
+ protocol = "hdfs"
252
+
253
+ def __init__(
254
+ self,
255
+ host="default",
256
+ port=0,
257
+ user=None,
258
+ kerb_ticket=None,
259
+ replication=3,
260
+ extra_conf=None,
261
+ **kwargs,
262
+ ):
263
+ """
264
+
265
+ Parameters
266
+ ----------
267
+ host: str
268
+ Hostname, IP or "default" to try to read from Hadoop config
269
+ port: int
270
+ Port to connect on, or default from Hadoop config if 0
271
+ user: str or None
272
+ If given, connect as this username
273
+ kerb_ticket: str or None
274
+ If given, use this ticket for authentication
275
+ replication: int
276
+ set replication factor of file for write operations. default value is 3.
277
+ extra_conf: None or dict
278
+ Passed on to HadoopFileSystem
279
+ """
280
+ from pyarrow.fs import HadoopFileSystem
281
+
282
+ fs = HadoopFileSystem(
283
+ host=host,
284
+ port=port,
285
+ user=user,
286
+ kerb_ticket=kerb_ticket,
287
+ replication=replication,
288
+ extra_conf=extra_conf,
289
+ )
290
+ super().__init__(fs=fs, **kwargs)
291
+
292
+ @staticmethod
293
+ def _get_kwargs_from_urls(path):
294
+ ops = infer_storage_options(path)
295
+ out = {}
296
+ if ops.get("host", None):
297
+ out["host"] = ops["host"]
298
+ if ops.get("username", None):
299
+ out["user"] = ops["username"]
300
+ if ops.get("port", None):
301
+ out["port"] = ops["port"]
302
+ if ops.get("url_query", None):
303
+ queries = parse_qs(ops["url_query"])
304
+ if queries.get("replication", None):
305
+ out["replication"] = int(queries["replication"][0])
306
+ return out
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import hashlib
5
+
6
+ from fsspec.implementations.local import make_path_posix
7
+
8
+
9
+ class AbstractCacheMapper(abc.ABC):
10
+ """Abstract super-class for mappers from remote URLs to local cached
11
+ basenames.
12
+ """
13
+
14
+ @abc.abstractmethod
15
+ def __call__(self, path: str) -> str:
16
+ ...
17
+
18
+ def __eq__(self, other: object) -> bool:
19
+ # Identity only depends on class. When derived classes have attributes
20
+ # they will need to be included.
21
+ return isinstance(other, type(self))
22
+
23
+ def __hash__(self) -> int:
24
+ # Identity only depends on class. When derived classes have attributes
25
+ # they will need to be included.
26
+ return hash(type(self))
27
+
28
+
29
+ class BasenameCacheMapper(AbstractCacheMapper):
30
+ """Cache mapper that uses the basename of the remote URL and a fixed number
31
+ of directory levels above this.
32
+
33
+ The default is zero directory levels, meaning different paths with the same
34
+ basename will have the same cached basename.
35
+ """
36
+
37
+ def __init__(self, directory_levels: int = 0):
38
+ if directory_levels < 0:
39
+ raise ValueError(
40
+ "BasenameCacheMapper requires zero or positive directory_levels"
41
+ )
42
+ self.directory_levels = directory_levels
43
+
44
+ # Separator for directories when encoded as strings.
45
+ self._separator = "_@_"
46
+
47
+ def __call__(self, path: str) -> str:
48
+ path = make_path_posix(path)
49
+ prefix, *bits = path.rsplit("/", self.directory_levels + 1)
50
+ if bits:
51
+ return self._separator.join(bits)
52
+ else:
53
+ return prefix # No separator found, simple filename
54
+
55
+ def __eq__(self, other: object) -> bool:
56
+ return super().__eq__(other) and self.directory_levels == other.directory_levels
57
+
58
+ def __hash__(self) -> int:
59
+ return super().__hash__() ^ hash(self.directory_levels)
60
+
61
+
62
+ class HashCacheMapper(AbstractCacheMapper):
63
+ """Cache mapper that uses a hash of the remote URL."""
64
+
65
+ def __call__(self, path: str) -> str:
66
+ return hashlib.sha256(path.encode()).hexdigest()
67
+
68
+
69
+ def create_cache_mapper(same_names: bool) -> AbstractCacheMapper:
70
+ """Factory method to create cache mapper for backward compatibility with
71
+ ``CachingFileSystem`` constructor using ``same_names`` kwarg.
72
+ """
73
+ if same_names:
74
+ return BasenameCacheMapper()
75
+ else:
76
+ return HashCacheMapper()
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pickle
5
+ import time
6
+ from typing import TYPE_CHECKING
7
+
8
+ from fsspec.utils import atomic_write
9
+
10
+ try:
11
+ import ujson as json
12
+ except ImportError:
13
+ if not TYPE_CHECKING:
14
+ import json
15
+
16
+ if TYPE_CHECKING:
17
+ from typing import Any, Dict, Iterator, Literal
18
+
19
+ from typing_extensions import TypeAlias
20
+
21
+ from .cached import CachingFileSystem
22
+
23
+ Detail: TypeAlias = Dict[str, Any]
24
+
25
+
26
+ class CacheMetadata:
27
+ """Cache metadata.
28
+
29
+ All reading and writing of cache metadata is performed by this class,
30
+ accessing the cached files and blocks is not.
31
+
32
+ Metadata is stored in a single file per storage directory in JSON format.
33
+ For backward compatibility, also reads metadata stored in pickle format
34
+ which is converted to JSON when next saved.
35
+ """
36
+
37
+ def __init__(self, storage: list[str]):
38
+ """
39
+
40
+ Parameters
41
+ ----------
42
+ storage: list[str]
43
+ Directories containing cached files, must be at least one. Metadata
44
+ is stored in the last of these directories by convention.
45
+ """
46
+ if not storage:
47
+ raise ValueError("CacheMetadata expects at least one storage location")
48
+
49
+ self._storage = storage
50
+ self.cached_files: list[Detail] = [{}]
51
+
52
+ # Private attribute to force saving of metadata in pickle format rather than
53
+ # JSON for use in tests to confirm can read both pickle and JSON formats.
54
+ self._force_save_pickle = False
55
+
56
+ def _load(self, fn: str) -> Detail:
57
+ """Low-level function to load metadata from specific file"""
58
+ try:
59
+ with open(fn, "r") as f:
60
+ return json.load(f)
61
+ except ValueError:
62
+ with open(fn, "rb") as f:
63
+ return pickle.load(f)
64
+
65
+ def _save(self, metadata_to_save: Detail, fn: str) -> None:
66
+ """Low-level function to save metadata to specific file"""
67
+ if self._force_save_pickle:
68
+ with atomic_write(fn) as f:
69
+ pickle.dump(metadata_to_save, f)
70
+ else:
71
+ with atomic_write(fn, mode="w") as f:
72
+ json.dump(metadata_to_save, f)
73
+
74
+ def _scan_locations(
75
+ self, writable_only: bool = False
76
+ ) -> Iterator[tuple[str, str, bool]]:
77
+ """Yield locations (filenames) where metadata is stored, and whether
78
+ writable or not.
79
+
80
+ Parameters
81
+ ----------
82
+ writable: bool
83
+ Set to True to only yield writable locations.
84
+
85
+ Returns
86
+ -------
87
+ Yields (str, str, bool)
88
+ """
89
+ n = len(self._storage)
90
+ for i, storage in enumerate(self._storage):
91
+ writable = i == n - 1
92
+ if writable_only and not writable:
93
+ continue
94
+ yield os.path.join(storage, "cache"), storage, writable
95
+
96
+ def check_file(
97
+ self, path: str, cfs: CachingFileSystem | None
98
+ ) -> Literal[False] | tuple[Detail, str]:
99
+ """If path is in cache return its details, otherwise return ``False``.
100
+
101
+ If the optional CachingFileSystem is specified then it is used to
102
+ perform extra checks to reject possible matches, such as if they are
103
+ too old.
104
+ """
105
+ for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files):
106
+ if path not in cache:
107
+ continue
108
+ detail = cache[path].copy()
109
+
110
+ if cfs is not None:
111
+ if cfs.check_files and detail["uid"] != cfs.fs.ukey(path):
112
+ # Wrong file as determined by hash of file properties
113
+ continue
114
+ if cfs.expiry and time.time() - detail["time"] > cfs.expiry:
115
+ # Cached file has expired
116
+ continue
117
+
118
+ fn = os.path.join(base, detail["fn"])
119
+ if os.path.exists(fn):
120
+ return detail, fn
121
+ return False
122
+
123
+ def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]:
124
+ """Remove expired metadata from the cache.
125
+
126
+ Returns names of files corresponding to expired metadata and a boolean
127
+ flag indicating whether the writable cache is empty. Caller is
128
+ responsible for deleting the expired files.
129
+ """
130
+ expired_files = []
131
+ for path, detail in self.cached_files[-1].copy().items():
132
+ if time.time() - detail["time"] > expiry_time:
133
+ fn = detail.get("fn", "")
134
+ if not fn:
135
+ raise RuntimeError(
136
+ f"Cache metadata does not contain 'fn' for {path}"
137
+ )
138
+ fn = os.path.join(self._storage[-1], fn)
139
+ expired_files.append(fn)
140
+ self.cached_files[-1].pop(path)
141
+
142
+ if self.cached_files[-1]:
143
+ cache_path = os.path.join(self._storage[-1], "cache")
144
+ self._save(self.cached_files[-1], cache_path)
145
+
146
+ writable_cache_empty = not self.cached_files[-1]
147
+ return expired_files, writable_cache_empty
148
+
149
+ def load(self) -> None:
150
+ """Load all metadata from disk and store in ``self.cached_files``"""
151
+ cached_files = []
152
+ for fn, _, _ in self._scan_locations():
153
+ if os.path.exists(fn):
154
+ # TODO: consolidate blocks here
155
+ loaded_cached_files = self._load(fn)
156
+ for c in loaded_cached_files.values():
157
+ if isinstance(c["blocks"], list):
158
+ c["blocks"] = set(c["blocks"])
159
+ cached_files.append(loaded_cached_files)
160
+ else:
161
+ cached_files.append({})
162
+ self.cached_files = cached_files or [{}]
163
+
164
+ def on_close_cached_file(self, f: Any, path: str) -> None:
165
+ """Perform side-effect actions on closing a cached file.
166
+
167
+ The actual closing of the file is the responsibility of the caller.
168
+ """
169
+ # File must be writeble, so in self.cached_files[-1]
170
+ c = self.cached_files[-1][path]
171
+ if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size:
172
+ c["blocks"] = True
173
+
174
+ def pop_file(self, path: str) -> str | None:
175
+ """Remove metadata of cached file.
176
+
177
+ If path is in the cache, return the filename of the cached file,
178
+ otherwise return ``None``. Caller is responsible for deleting the
179
+ cached file.
180
+ """
181
+ details = self.check_file(path, None)
182
+ if not details:
183
+ return None
184
+ _, fn = details
185
+ if fn.startswith(self._storage[-1]):
186
+ self.cached_files[-1].pop(path)
187
+ self.save()
188
+ else:
189
+ raise PermissionError(
190
+ "Can only delete cached file in last, writable cache location"
191
+ )
192
+ return fn
193
+
194
+ def save(self) -> None:
195
+ """Save metadata to disk"""
196
+ for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files):
197
+ if not writable:
198
+ continue
199
+
200
+ if os.path.exists(fn):
201
+ cached_files = self._load(fn)
202
+ for k, c in cached_files.items():
203
+ if k in cache:
204
+ if c["blocks"] is True or cache[k]["blocks"] is True:
205
+ c["blocks"] = True
206
+ else:
207
+ # self.cached_files[*][*]["blocks"] must continue to
208
+ # point to the same set object so that updates
209
+ # performed by MMapCache are propagated back to
210
+ # self.cached_files.
211
+ blocks = cache[k]["blocks"]
212
+ blocks.update(c["blocks"])
213
+ c["blocks"] = blocks
214
+ c["time"] = max(c["time"], cache[k]["time"])
215
+ c["uid"] = cache[k]["uid"]
216
+
217
+ # Files can be added to cache after it was written once
218
+ for k, c in cache.items():
219
+ if k not in cached_files:
220
+ cached_files[k] = c
221
+ else:
222
+ cached_files = cache
223
+ cache = {k: v.copy() for k, v in cached_files.items()}
224
+ for c in cache.values():
225
+ if isinstance(c["blocks"], set):
226
+ c["blocks"] = list(c["blocks"])
227
+ self._save(cache, fn)
228
+ self.cached_files[-1] = cached_files
229
+
230
+ def update_file(self, path: str, detail: Detail) -> None:
231
+ """Update metadata for specific file in memory, do not save"""
232
+ self.cached_files[-1][path] = detail
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/cached.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import tempfile
7
+ import time
8
+ import weakref
9
+ from shutil import rmtree
10
+ from typing import TYPE_CHECKING, Any, Callable, ClassVar
11
+
12
+ from fsspec import AbstractFileSystem, filesystem
13
+ from fsspec.callbacks import DEFAULT_CALLBACK
14
+ from fsspec.compression import compr
15
+ from fsspec.core import BaseCache, MMapCache
16
+ from fsspec.exceptions import BlocksizeMismatchError
17
+ from fsspec.implementations.cache_mapper import create_cache_mapper
18
+ from fsspec.implementations.cache_metadata import CacheMetadata
19
+ from fsspec.spec import AbstractBufferedFile
20
+ from fsspec.transaction import Transaction
21
+ from fsspec.utils import infer_compression
22
+
23
+ if TYPE_CHECKING:
24
+ from fsspec.implementations.cache_mapper import AbstractCacheMapper
25
+
26
+ logger = logging.getLogger("fsspec.cached")
27
+
28
+
29
+ class WriteCachedTransaction(Transaction):
30
+ def complete(self, commit=True):
31
+ rpaths = [f.path for f in self.files]
32
+ lpaths = [f.fn for f in self.files]
33
+ if commit:
34
+ self.fs.put(lpaths, rpaths)
35
+ # else remove?
36
+ self.fs._intrans = False
37
+
38
+
39
+ class CachingFileSystem(AbstractFileSystem):
40
+ """Locally caching filesystem, layer over any other FS
41
+
42
+ This class implements chunk-wise local storage of remote files, for quick
43
+ access after the initial download. The files are stored in a given
44
+ directory with hashes of URLs for the filenames. If no directory is given,
45
+ a temporary one is used, which should be cleaned up by the OS after the
46
+ process ends. The files themselves are sparse (as implemented in
47
+ :class:`~fsspec.caching.MMapCache`), so only the data which is accessed
48
+ takes up space.
49
+
50
+ Restrictions:
51
+
52
+ - the block-size must be the same for each access of a given file, unless
53
+ all blocks of the file have already been read
54
+ - caching can only be applied to file-systems which produce files
55
+ derived from fsspec.spec.AbstractBufferedFile ; LocalFileSystem is also
56
+ allowed, for testing
57
+ """
58
+
59
+ protocol: ClassVar[str | tuple[str, ...]] = ("blockcache", "cached")
60
+
61
+ def __init__(
62
+ self,
63
+ target_protocol=None,
64
+ cache_storage="TMP",
65
+ cache_check=10,
66
+ check_files=False,
67
+ expiry_time=604800,
68
+ target_options=None,
69
+ fs=None,
70
+ same_names: bool | None = None,
71
+ compression=None,
72
+ cache_mapper: AbstractCacheMapper | None = None,
73
+ **kwargs,
74
+ ):
75
+ """
76
+
77
+ Parameters
78
+ ----------
79
+ target_protocol: str (optional)
80
+ Target filesystem protocol. Provide either this or ``fs``.
81
+ cache_storage: str or list(str)
82
+ Location to store files. If "TMP", this is a temporary directory,
83
+ and will be cleaned up by the OS when this process ends (or later).
84
+ If a list, each location will be tried in the order given, but
85
+ only the last will be considered writable.
86
+ cache_check: int
87
+ Number of seconds between reload of cache metadata
88
+ check_files: bool
89
+ Whether to explicitly see if the UID of the remote file matches
90
+ the stored one before using. Warning: some file systems such as
91
+ HTTP cannot reliably give a unique hash of the contents of some
92
+ path, so be sure to set this option to False.
93
+ expiry_time: int
94
+ The time in seconds after which a local copy is considered useless.
95
+ Set to falsy to prevent expiry. The default is equivalent to one
96
+ week.
97
+ target_options: dict or None
98
+ Passed to the instantiation of the FS, if fs is None.
99
+ fs: filesystem instance
100
+ The target filesystem to run against. Provide this or ``protocol``.
101
+ same_names: bool (optional)
102
+ By default, target URLs are hashed using a ``HashCacheMapper`` so
103
+ that files from different backends with the same basename do not
104
+ conflict. If this argument is ``true``, a ``BasenameCacheMapper``
105
+ is used instead. Other cache mapper options are available by using
106
+ the ``cache_mapper`` keyword argument. Only one of this and
107
+ ``cache_mapper`` should be specified.
108
+ compression: str (optional)
109
+ To decompress on download. Can be 'infer' (guess from the URL name),
110
+ one of the entries in ``fsspec.compression.compr``, or None for no
111
+ decompression.
112
+ cache_mapper: AbstractCacheMapper (optional)
113
+ The object use to map from original filenames to cached filenames.
114
+ Only one of this and ``same_names`` should be specified.
115
+ """
116
+ super().__init__(**kwargs)
117
+ if fs is None and target_protocol is None:
118
+ raise ValueError(
119
+ "Please provide filesystem instance(fs) or target_protocol"
120
+ )
121
+ if not (fs is None) ^ (target_protocol is None):
122
+ raise ValueError(
123
+ "Both filesystems (fs) and target_protocol may not be both given."
124
+ )
125
+ if cache_storage == "TMP":
126
+ tempdir = tempfile.mkdtemp()
127
+ storage = [tempdir]
128
+ weakref.finalize(self, self._remove_tempdir, tempdir)
129
+ else:
130
+ if isinstance(cache_storage, str):
131
+ storage = [cache_storage]
132
+ else:
133
+ storage = cache_storage
134
+ os.makedirs(storage[-1], exist_ok=True)
135
+ self.storage = storage
136
+ self.kwargs = target_options or {}
137
+ self.cache_check = cache_check
138
+ self.check_files = check_files
139
+ self.expiry = expiry_time
140
+ self.compression = compression
141
+
142
+ # Size of cache in bytes. If None then the size is unknown and will be
143
+ # recalculated the next time cache_size() is called. On writes to the
144
+ # cache this is reset to None.
145
+ self._cache_size = None
146
+
147
+ if same_names is not None and cache_mapper is not None:
148
+ raise ValueError(
149
+ "Cannot specify both same_names and cache_mapper in "
150
+ "CachingFileSystem.__init__"
151
+ )
152
+ if cache_mapper is not None:
153
+ self._mapper = cache_mapper
154
+ else:
155
+ self._mapper = create_cache_mapper(
156
+ same_names if same_names is not None else False
157
+ )
158
+
159
+ self.target_protocol = (
160
+ target_protocol
161
+ if isinstance(target_protocol, str)
162
+ else (fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0])
163
+ )
164
+ self._metadata = CacheMetadata(self.storage)
165
+ self.load_cache()
166
+ self.fs = fs if fs is not None else filesystem(target_protocol, **self.kwargs)
167
+
168
+ def _strip_protocol(path):
169
+ # acts as a method, since each instance has a difference target
170
+ return self.fs._strip_protocol(type(self)._strip_protocol(path))
171
+
172
+ self._strip_protocol: Callable = _strip_protocol
173
+
174
+ @staticmethod
175
+ def _remove_tempdir(tempdir):
176
+ try:
177
+ rmtree(tempdir)
178
+ except Exception:
179
+ pass
180
+
181
+ def _mkcache(self):
182
+ os.makedirs(self.storage[-1], exist_ok=True)
183
+
184
+ def cache_size(self):
185
+ """Return size of cache in bytes.
186
+
187
+ If more than one cache directory is in use, only the size of the last
188
+ one (the writable cache directory) is returned.
189
+ """
190
+ if self._cache_size is None:
191
+ cache_dir = self.storage[-1]
192
+ self._cache_size = filesystem("file").du(cache_dir, withdirs=True)
193
+ return self._cache_size
194
+
195
+ def load_cache(self):
196
+ """Read set of stored blocks from file"""
197
+ self._metadata.load()
198
+ self._mkcache()
199
+ self.last_cache = time.time()
200
+
201
+ def save_cache(self):
202
+ """Save set of stored blocks from file"""
203
+ self._mkcache()
204
+ self._metadata.save()
205
+ self.last_cache = time.time()
206
+ self._cache_size = None
207
+
208
+ def _check_cache(self):
209
+ """Reload caches if time elapsed or any disappeared"""
210
+ self._mkcache()
211
+ if not self.cache_check:
212
+ # explicitly told not to bother checking
213
+ return
214
+ timecond = time.time() - self.last_cache > self.cache_check
215
+ existcond = all(os.path.exists(storage) for storage in self.storage)
216
+ if timecond or not existcond:
217
+ self.load_cache()
218
+
219
+ def _check_file(self, path):
220
+ """Is path in cache and still valid"""
221
+ path = self._strip_protocol(path)
222
+ self._check_cache()
223
+ return self._metadata.check_file(path, self)
224
+
225
+ def clear_cache(self):
226
+ """Remove all files and metadata from the cache
227
+
228
+ In the case of multiple cache locations, this clears only the last one,
229
+ which is assumed to be the read/write one.
230
+ """
231
+ rmtree(self.storage[-1])
232
+ self.load_cache()
233
+ self._cache_size = None
234
+
235
+ def clear_expired_cache(self, expiry_time=None):
236
+ """Remove all expired files and metadata from the cache
237
+
238
+ In the case of multiple cache locations, this clears only the last one,
239
+ which is assumed to be the read/write one.
240
+
241
+ Parameters
242
+ ----------
243
+ expiry_time: int
244
+ The time in seconds after which a local copy is considered useless.
245
+ If not defined the default is equivalent to the attribute from the
246
+ file caching instantiation.
247
+ """
248
+
249
+ if not expiry_time:
250
+ expiry_time = self.expiry
251
+
252
+ self._check_cache()
253
+
254
+ expired_files, writable_cache_empty = self._metadata.clear_expired(expiry_time)
255
+ for fn in expired_files:
256
+ if os.path.exists(fn):
257
+ os.remove(fn)
258
+
259
+ if writable_cache_empty:
260
+ rmtree(self.storage[-1])
261
+ self.load_cache()
262
+
263
+ self._cache_size = None
264
+
265
+ def pop_from_cache(self, path):
266
+ """Remove cached version of given file
267
+
268
+ Deletes local copy of the given (remote) path. If it is found in a cache
269
+ location which is not the last, it is assumed to be read-only, and
270
+ raises PermissionError
271
+ """
272
+ path = self._strip_protocol(path)
273
+ fn = self._metadata.pop_file(path)
274
+ if fn is not None:
275
+ os.remove(fn)
276
+ self._cache_size = None
277
+
278
+ def _open(
279
+ self,
280
+ path,
281
+ mode="rb",
282
+ block_size=None,
283
+ autocommit=True,
284
+ cache_options=None,
285
+ **kwargs,
286
+ ):
287
+ """Wrap the target _open
288
+
289
+ If the whole file exists in the cache, just open it locally and
290
+ return that.
291
+
292
+ Otherwise, open the file on the target FS, and make it have a mmap
293
+ cache pointing to the location which we determine, in our cache.
294
+ The ``blocks`` instance is shared, so as the mmap cache instance
295
+ updates, so does the entry in our ``cached_files`` attribute.
296
+ We monkey-patch this file, so that when it closes, we call
297
+ ``close_and_update`` to save the state of the blocks.
298
+ """
299
+ path = self._strip_protocol(path)
300
+
301
+ path = self.fs._strip_protocol(path)
302
+ if "r" not in mode:
303
+ return self.fs._open(
304
+ path,
305
+ mode=mode,
306
+ block_size=block_size,
307
+ autocommit=autocommit,
308
+ cache_options=cache_options,
309
+ **kwargs,
310
+ )
311
+ detail = self._check_file(path)
312
+ if detail:
313
+ # file is in cache
314
+ detail, fn = detail
315
+ hash, blocks = detail["fn"], detail["blocks"]
316
+ if blocks is True:
317
+ # stored file is complete
318
+ logger.debug("Opening local copy of %s", path)
319
+ return open(fn, mode)
320
+ # TODO: action where partial file exists in read-only cache
321
+ logger.debug("Opening partially cached copy of %s", path)
322
+ else:
323
+ hash = self._mapper(path)
324
+ fn = os.path.join(self.storage[-1], hash)
325
+ blocks = set()
326
+ detail = {
327
+ "original": path,
328
+ "fn": hash,
329
+ "blocks": blocks,
330
+ "time": time.time(),
331
+ "uid": self.fs.ukey(path),
332
+ }
333
+ self._metadata.update_file(path, detail)
334
+ logger.debug("Creating local sparse file for %s", path)
335
+
336
+ # call target filesystems open
337
+ self._mkcache()
338
+ f = self.fs._open(
339
+ path,
340
+ mode=mode,
341
+ block_size=block_size,
342
+ autocommit=autocommit,
343
+ cache_options=cache_options,
344
+ cache_type="none",
345
+ **kwargs,
346
+ )
347
+ if self.compression:
348
+ comp = (
349
+ infer_compression(path)
350
+ if self.compression == "infer"
351
+ else self.compression
352
+ )
353
+ f = compr[comp](f, mode="rb")
354
+ if "blocksize" in detail:
355
+ if detail["blocksize"] != f.blocksize:
356
+ raise BlocksizeMismatchError(
357
+ f"Cached file must be reopened with same block"
358
+ f" size as original (old: {detail['blocksize']},"
359
+ f" new {f.blocksize})"
360
+ )
361
+ else:
362
+ detail["blocksize"] = f.blocksize
363
+ f.cache = MMapCache(f.blocksize, f._fetch_range, f.size, fn, blocks)
364
+ close = f.close
365
+ f.close = lambda: self.close_and_update(f, close)
366
+ self.save_cache()
367
+ return f
368
+
369
+ def _parent(self, path):
370
+ return self.fs._parent(path)
371
+
372
+ def hash_name(self, path: str, *args: Any) -> str:
373
+ # Kept for backward compatibility with downstream libraries.
374
+ # Ignores extra arguments, previously same_name boolean.
375
+ return self._mapper(path)
376
+
377
+ def close_and_update(self, f, close):
378
+ """Called when a file is closing, so store the set of blocks"""
379
+ if f.closed:
380
+ return
381
+ path = self._strip_protocol(f.path)
382
+ self._metadata.on_close_cached_file(f, path)
383
+ try:
384
+ logger.debug("going to save")
385
+ self.save_cache()
386
+ logger.debug("saved")
387
+ except OSError:
388
+ logger.debug("Cache saving failed while closing file")
389
+ except NameError:
390
+ logger.debug("Cache save failed due to interpreter shutdown")
391
+ close()
392
+ f.closed = True
393
+
394
+ def __getattribute__(self, item):
395
+ if item in [
396
+ "load_cache",
397
+ "_open",
398
+ "save_cache",
399
+ "close_and_update",
400
+ "__init__",
401
+ "__getattribute__",
402
+ "__reduce__",
403
+ "_make_local_details",
404
+ "open",
405
+ "cat",
406
+ "cat_file",
407
+ "cat_ranges",
408
+ "get",
409
+ "read_block",
410
+ "tail",
411
+ "head",
412
+ "_check_file",
413
+ "_check_cache",
414
+ "_mkcache",
415
+ "clear_cache",
416
+ "clear_expired_cache",
417
+ "pop_from_cache",
418
+ "_mkcache",
419
+ "local_file",
420
+ "_paths_from_path",
421
+ "get_mapper",
422
+ "open_many",
423
+ "commit_many",
424
+ "hash_name",
425
+ "__hash__",
426
+ "__eq__",
427
+ "to_json",
428
+ "cache_size",
429
+ "pipe_file",
430
+ "pipe",
431
+ "start_transaction",
432
+ "end_transaction",
433
+ ]:
434
+ # all the methods defined in this class. Note `open` here, since
435
+ # it calls `_open`, but is actually in superclass
436
+ return lambda *args, **kw: getattr(type(self), item).__get__(self)(
437
+ *args, **kw
438
+ )
439
+ if item in ["__reduce_ex__"]:
440
+ raise AttributeError
441
+ if item in ["transaction"]:
442
+ # property
443
+ return type(self).transaction.__get__(self)
444
+ if item in ["_cache", "transaction_type"]:
445
+ # class attributes
446
+ return getattr(type(self), item)
447
+ if item == "__class__":
448
+ return type(self)
449
+ d = object.__getattribute__(self, "__dict__")
450
+ fs = d.get("fs", None) # fs is not immediately defined
451
+ if item in d:
452
+ return d[item]
453
+ elif fs is not None:
454
+ if item in fs.__dict__:
455
+ # attribute of instance
456
+ return fs.__dict__[item]
457
+ # attributed belonging to the target filesystem
458
+ cls = type(fs)
459
+ m = getattr(cls, item)
460
+ if (inspect.isfunction(m) or inspect.isdatadescriptor(m)) and (
461
+ not hasattr(m, "__self__") or m.__self__ is None
462
+ ):
463
+ # instance method
464
+ return m.__get__(fs, cls)
465
+ return m # class method or attribute
466
+ else:
467
+ # attributes of the superclass, while target is being set up
468
+ return super().__getattribute__(item)
469
+
470
+ def __eq__(self, other):
471
+ """Test for equality."""
472
+ if self is other:
473
+ return True
474
+ if not isinstance(other, type(self)):
475
+ return False
476
+ return (
477
+ self.storage == other.storage
478
+ and self.kwargs == other.kwargs
479
+ and self.cache_check == other.cache_check
480
+ and self.check_files == other.check_files
481
+ and self.expiry == other.expiry
482
+ and self.compression == other.compression
483
+ and self._mapper == other._mapper
484
+ and self.target_protocol == other.target_protocol
485
+ )
486
+
487
+ def __hash__(self):
488
+ """Calculate hash."""
489
+ return (
490
+ hash(tuple(self.storage))
491
+ ^ hash(str(self.kwargs))
492
+ ^ hash(self.cache_check)
493
+ ^ hash(self.check_files)
494
+ ^ hash(self.expiry)
495
+ ^ hash(self.compression)
496
+ ^ hash(self._mapper)
497
+ ^ hash(self.target_protocol)
498
+ )
499
+
500
+ def to_json(self):
501
+ """Calculate JSON representation.
502
+
503
+ Not implemented yet for CachingFileSystem.
504
+ """
505
+ raise NotImplementedError(
506
+ "CachingFileSystem JSON representation not implemented"
507
+ )
508
+
509
+
510
+ class WholeFileCacheFileSystem(CachingFileSystem):
511
+ """Caches whole remote files on first access
512
+
513
+ This class is intended as a layer over any other file system, and
514
+ will make a local copy of each file accessed, so that all subsequent
515
+ reads are local. This is similar to ``CachingFileSystem``, but without
516
+ the block-wise functionality and so can work even when sparse files
517
+ are not allowed. See its docstring for definition of the init
518
+ arguments.
519
+
520
+ The class still needs access to the remote store for listing files,
521
+ and may refresh cached files.
522
+ """
523
+
524
+ protocol = "filecache"
525
+ local_file = True
526
+
527
+ def open_many(self, open_files, **kwargs):
528
+ paths = [of.path for of in open_files]
529
+ if "r" in open_files.mode:
530
+ self._mkcache()
531
+ else:
532
+ return [
533
+ LocalTempFile(
534
+ self.fs,
535
+ path,
536
+ mode=open_files.mode,
537
+ fn=os.path.join(self.storage[-1], self._mapper(path)),
538
+ **kwargs,
539
+ )
540
+ for path in paths
541
+ ]
542
+
543
+ if self.compression:
544
+ raise NotImplementedError
545
+ details = [self._check_file(sp) for sp in paths]
546
+ downpath = [p for p, d in zip(paths, details) if not d]
547
+ downfn0 = [
548
+ os.path.join(self.storage[-1], self._mapper(p))
549
+ for p, d in zip(paths, details)
550
+ ] # keep these path names for opening later
551
+ downfn = [fn for fn, d in zip(downfn0, details) if not d]
552
+ if downpath:
553
+ # skip if all files are already cached and up to date
554
+ self.fs.get(downpath, downfn)
555
+
556
+ # update metadata - only happens when downloads are successful
557
+ newdetail = [
558
+ {
559
+ "original": path,
560
+ "fn": self._mapper(path),
561
+ "blocks": True,
562
+ "time": time.time(),
563
+ "uid": self.fs.ukey(path),
564
+ }
565
+ for path in downpath
566
+ ]
567
+ for path, detail in zip(downpath, newdetail):
568
+ self._metadata.update_file(path, detail)
569
+ self.save_cache()
570
+
571
+ def firstpart(fn):
572
+ # helper to adapt both whole-file and simple-cache
573
+ return fn[1] if isinstance(fn, tuple) else fn
574
+
575
+ return [
576
+ open(firstpart(fn0) if fn0 else fn1, mode=open_files.mode)
577
+ for fn0, fn1 in zip(details, downfn0)
578
+ ]
579
+
580
+ def commit_many(self, open_files):
581
+ self.fs.put([f.fn for f in open_files], [f.path for f in open_files])
582
+ [f.close() for f in open_files]
583
+ for f in open_files:
584
+ # in case autocommit is off, and so close did not already delete
585
+ try:
586
+ os.remove(f.name)
587
+ except FileNotFoundError:
588
+ pass
589
+ self._cache_size = None
590
+
591
+ def _make_local_details(self, path):
592
+ hash = self._mapper(path)
593
+ fn = os.path.join(self.storage[-1], hash)
594
+ detail = {
595
+ "original": path,
596
+ "fn": hash,
597
+ "blocks": True,
598
+ "time": time.time(),
599
+ "uid": self.fs.ukey(path),
600
+ }
601
+ self._metadata.update_file(path, detail)
602
+ logger.debug("Copying %s to local cache", path)
603
+ return fn
604
+
605
+ def cat(
606
+ self,
607
+ path,
608
+ recursive=False,
609
+ on_error="raise",
610
+ callback=DEFAULT_CALLBACK,
611
+ **kwargs,
612
+ ):
613
+ paths = self.expand_path(
614
+ path, recursive=recursive, maxdepth=kwargs.get("maxdepth", None)
615
+ )
616
+ getpaths = []
617
+ storepaths = []
618
+ fns = []
619
+ out = {}
620
+ for p in paths.copy():
621
+ try:
622
+ detail = self._check_file(p)
623
+ if not detail:
624
+ fn = self._make_local_details(p)
625
+ getpaths.append(p)
626
+ storepaths.append(fn)
627
+ else:
628
+ detail, fn = detail if isinstance(detail, tuple) else (None, detail)
629
+ fns.append(fn)
630
+ except Exception as e:
631
+ if on_error == "raise":
632
+ raise
633
+ if on_error == "return":
634
+ out[p] = e
635
+ paths.remove(p)
636
+
637
+ if getpaths:
638
+ self.fs.get(getpaths, storepaths)
639
+ self.save_cache()
640
+
641
+ callback.set_size(len(paths))
642
+ for p, fn in zip(paths, fns):
643
+ with open(fn, "rb") as f:
644
+ out[p] = f.read()
645
+ callback.relative_update(1)
646
+ if isinstance(path, str) and len(paths) == 1 and recursive is False:
647
+ out = out[paths[0]]
648
+ return out
649
+
650
+ def _open(self, path, mode="rb", **kwargs):
651
+ path = self._strip_protocol(path)
652
+ if "r" not in mode:
653
+ fn = self._make_local_details(path)
654
+ user_specified_kwargs = {
655
+ k: v
656
+ for k, v in kwargs.items()
657
+ # those kwargs were added by open(), we don't want them
658
+ if k not in ["autocommit", "block_size", "cache_options"]
659
+ }
660
+ return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
661
+ detail = self._check_file(path)
662
+ if detail:
663
+ detail, fn = detail
664
+ _, blocks = detail["fn"], detail["blocks"]
665
+ if blocks is True:
666
+ logger.debug("Opening local copy of %s", path)
667
+
668
+ # In order to support downstream filesystems to be able to
669
+ # infer the compression from the original filename, like
670
+ # the `TarFileSystem`, let's extend the `io.BufferedReader`
671
+ # fileobject protocol by adding a dedicated attribute
672
+ # `original`.
673
+ f = open(fn, mode)
674
+ f.original = detail.get("original")
675
+ return f
676
+ else:
677
+ raise ValueError(
678
+ f"Attempt to open partially cached file {path}"
679
+ f" as a wholly cached file"
680
+ )
681
+ else:
682
+ fn = self._make_local_details(path)
683
+ kwargs["mode"] = mode
684
+
685
+ # call target filesystems open
686
+ self._mkcache()
687
+ if self.compression:
688
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
689
+ if isinstance(f, AbstractBufferedFile):
690
+ # want no type of caching if just downloading whole thing
691
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
692
+ comp = (
693
+ infer_compression(path)
694
+ if self.compression == "infer"
695
+ else self.compression
696
+ )
697
+ f = compr[comp](f, mode="rb")
698
+ data = True
699
+ while data:
700
+ block = getattr(f, "blocksize", 5 * 2**20)
701
+ data = f.read(block)
702
+ f2.write(data)
703
+ else:
704
+ self.fs.get_file(path, fn)
705
+ self.save_cache()
706
+ return self._open(path, mode)
707
+
708
+
709
+ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
710
+ """Caches whole remote files on first access
711
+
712
+ This class is intended as a layer over any other file system, and
713
+ will make a local copy of each file accessed, so that all subsequent
714
+ reads are local. This implementation only copies whole files, and
715
+ does not keep any metadata about the download time or file details.
716
+ It is therefore safer to use in multi-threaded/concurrent situations.
717
+
718
+ This is the only of the caching filesystems that supports write: you will
719
+ be given a real local open file, and upon close and commit, it will be
720
+ uploaded to the target filesystem; the writability or the target URL is
721
+ not checked until that time.
722
+
723
+ """
724
+
725
+ protocol = "simplecache"
726
+ local_file = True
727
+ transaction_type = WriteCachedTransaction
728
+
729
+ def __init__(self, **kwargs):
730
+ kw = kwargs.copy()
731
+ for key in ["cache_check", "expiry_time", "check_files"]:
732
+ kw[key] = False
733
+ super().__init__(**kw)
734
+ for storage in self.storage:
735
+ if not os.path.exists(storage):
736
+ os.makedirs(storage, exist_ok=True)
737
+
738
+ def _check_file(self, path):
739
+ self._check_cache()
740
+ sha = self._mapper(path)
741
+ for storage in self.storage:
742
+ fn = os.path.join(storage, sha)
743
+ if os.path.exists(fn):
744
+ return fn
745
+
746
+ def save_cache(self):
747
+ pass
748
+
749
+ def load_cache(self):
750
+ pass
751
+
752
+ def pipe_file(self, path, value=None, **kwargs):
753
+ if self._intrans:
754
+ with self.open(path, "wb") as f:
755
+ f.write(value)
756
+ else:
757
+ super().pipe_file(path, value)
758
+
759
+ def pipe(self, path, value=None, **kwargs):
760
+ if isinstance(path, str):
761
+ self.pipe_file(self._strip_protocol(path), value, **kwargs)
762
+ elif isinstance(path, dict):
763
+ for k, v in path.items():
764
+ self.pipe_file(self._strip_protocol(k), v, **kwargs)
765
+ else:
766
+ raise ValueError("path must be str or dict")
767
+
768
+ def cat_ranges(
769
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
770
+ ):
771
+ lpaths = [self._check_file(p) for p in paths]
772
+ rpaths = [p for l, p in zip(lpaths, paths) if l is False]
773
+ lpaths = [l for l, p in zip(lpaths, paths) if l is False]
774
+ self.fs.get(rpaths, lpaths)
775
+ return super().cat_ranges(
776
+ paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
777
+ )
778
+
779
+ def _open(self, path, mode="rb", **kwargs):
780
+ path = self._strip_protocol(path)
781
+ sha = self._mapper(path)
782
+
783
+ if "r" not in mode:
784
+ fn = os.path.join(self.storage[-1], sha)
785
+ user_specified_kwargs = {
786
+ k: v
787
+ for k, v in kwargs.items()
788
+ if k not in ["autocommit", "block_size", "cache_options"]
789
+ } # those were added by open()
790
+ return LocalTempFile(
791
+ self,
792
+ path,
793
+ mode=mode,
794
+ autocommit=not self._intrans,
795
+ fn=fn,
796
+ **user_specified_kwargs,
797
+ )
798
+ fn = self._check_file(path)
799
+ if fn:
800
+ return open(fn, mode)
801
+
802
+ fn = os.path.join(self.storage[-1], sha)
803
+ logger.debug("Copying %s to local cache", path)
804
+ kwargs["mode"] = mode
805
+
806
+ self._mkcache()
807
+ self._cache_size = None
808
+ if self.compression:
809
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
810
+ if isinstance(f, AbstractBufferedFile):
811
+ # want no type of caching if just downloading whole thing
812
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
813
+ comp = (
814
+ infer_compression(path)
815
+ if self.compression == "infer"
816
+ else self.compression
817
+ )
818
+ f = compr[comp](f, mode="rb")
819
+ data = True
820
+ while data:
821
+ block = getattr(f, "blocksize", 5 * 2**20)
822
+ data = f.read(block)
823
+ f2.write(data)
824
+ else:
825
+ self.fs.get_file(path, fn)
826
+ return self._open(path, mode)
827
+
828
+
829
+ class LocalTempFile:
830
+ """A temporary local file, which will be uploaded on commit"""
831
+
832
+ def __init__(self, fs, path, fn, mode="wb", autocommit=True, seek=0, **kwargs):
833
+ self.fn = fn
834
+ self.fh = open(fn, mode)
835
+ self.mode = mode
836
+ if seek:
837
+ self.fh.seek(seek)
838
+ self.path = path
839
+ self.fs = fs
840
+ self.closed = False
841
+ self.autocommit = autocommit
842
+ self.kwargs = kwargs
843
+
844
+ def __reduce__(self):
845
+ # always open in r+b to allow continuing writing at a location
846
+ return (
847
+ LocalTempFile,
848
+ (self.fs, self.path, self.fn, "r+b", self.autocommit, self.tell()),
849
+ )
850
+
851
+ def __enter__(self):
852
+ return self.fh
853
+
854
+ def __exit__(self, exc_type, exc_val, exc_tb):
855
+ self.close()
856
+
857
+ def close(self):
858
+ if self.closed:
859
+ return
860
+ self.fh.close()
861
+ self.closed = True
862
+ if self.autocommit:
863
+ self.commit()
864
+
865
+ def discard(self):
866
+ self.fh.close()
867
+ os.remove(self.fn)
868
+
869
+ def commit(self):
870
+ self.fs.put(self.fn, self.path, **self.kwargs)
871
+ try:
872
+ os.remove(self.fn)
873
+ except (PermissionError, FileNotFoundError):
874
+ # file path may be held by new version of the file on windows
875
+ pass
876
+
877
+ @property
878
+ def name(self):
879
+ return self.fn
880
+
881
+ def __getattr__(self, item):
882
+ return getattr(self.fh, item)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/dask.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dask
2
+ from distributed.client import Client, _get_global_client
3
+ from distributed.worker import Worker
4
+
5
+ from fsspec import filesystem
6
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
7
+ from fsspec.utils import infer_storage_options
8
+
9
+
10
+ def _get_client(client):
11
+ if client is None:
12
+ return _get_global_client()
13
+ elif isinstance(client, Client):
14
+ return client
15
+ else:
16
+ # e.g., connection string
17
+ return Client(client)
18
+
19
+
20
+ def _in_worker():
21
+ return bool(Worker._instances)
22
+
23
+
24
+ class DaskWorkerFileSystem(AbstractFileSystem):
25
+ """View files accessible to a worker as any other remote file-system
26
+
27
+ When instances are run on the worker, uses the real filesystem. When
28
+ run on the client, they call the worker to provide information or data.
29
+
30
+ **Warning** this implementation is experimental, and read-only for now.
31
+ """
32
+
33
+ def __init__(
34
+ self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs
35
+ ):
36
+ super().__init__(**kwargs)
37
+ if not (fs is None) ^ (target_protocol is None):
38
+ raise ValueError(
39
+ "Please provide one of filesystem instance (fs) or"
40
+ " target_protocol, not both"
41
+ )
42
+ self.target_protocol = target_protocol
43
+ self.target_options = target_options
44
+ self.worker = None
45
+ self.client = client
46
+ self.fs = fs
47
+ self._determine_worker()
48
+
49
+ @staticmethod
50
+ def _get_kwargs_from_urls(path):
51
+ so = infer_storage_options(path)
52
+ if "host" in so and "port" in so:
53
+ return {"client": f"{so['host']}:{so['port']}"}
54
+ else:
55
+ return {}
56
+
57
+ def _determine_worker(self):
58
+ if _in_worker():
59
+ self.worker = True
60
+ if self.fs is None:
61
+ self.fs = filesystem(
62
+ self.target_protocol, **(self.target_options or {})
63
+ )
64
+ else:
65
+ self.worker = False
66
+ self.client = _get_client(self.client)
67
+ self.rfs = dask.delayed(self)
68
+
69
+ def mkdir(self, *args, **kwargs):
70
+ if self.worker:
71
+ self.fs.mkdir(*args, **kwargs)
72
+ else:
73
+ self.rfs.mkdir(*args, **kwargs).compute()
74
+
75
+ def rm(self, *args, **kwargs):
76
+ if self.worker:
77
+ self.fs.rm(*args, **kwargs)
78
+ else:
79
+ self.rfs.rm(*args, **kwargs).compute()
80
+
81
+ def copy(self, *args, **kwargs):
82
+ if self.worker:
83
+ self.fs.copy(*args, **kwargs)
84
+ else:
85
+ self.rfs.copy(*args, **kwargs).compute()
86
+
87
+ def mv(self, *args, **kwargs):
88
+ if self.worker:
89
+ self.fs.mv(*args, **kwargs)
90
+ else:
91
+ self.rfs.mv(*args, **kwargs).compute()
92
+
93
+ def ls(self, *args, **kwargs):
94
+ if self.worker:
95
+ return self.fs.ls(*args, **kwargs)
96
+ else:
97
+ return self.rfs.ls(*args, **kwargs).compute()
98
+
99
+ def _open(
100
+ self,
101
+ path,
102
+ mode="rb",
103
+ block_size=None,
104
+ autocommit=True,
105
+ cache_options=None,
106
+ **kwargs,
107
+ ):
108
+ if self.worker:
109
+ return self.fs._open(
110
+ path,
111
+ mode=mode,
112
+ block_size=block_size,
113
+ autocommit=autocommit,
114
+ cache_options=cache_options,
115
+ **kwargs,
116
+ )
117
+ else:
118
+ return DaskFile(
119
+ fs=self,
120
+ path=path,
121
+ mode=mode,
122
+ block_size=block_size,
123
+ autocommit=autocommit,
124
+ cache_options=cache_options,
125
+ **kwargs,
126
+ )
127
+
128
+ def fetch_range(self, path, mode, start, end):
129
+ if self.worker:
130
+ with self._open(path, mode) as f:
131
+ f.seek(start)
132
+ return f.read(end - start)
133
+ else:
134
+ return self.rfs.fetch_range(path, mode, start, end).compute()
135
+
136
+
137
+ class DaskFile(AbstractBufferedFile):
138
+ def __init__(self, mode="rb", **kwargs):
139
+ if mode != "rb":
140
+ raise ValueError('Remote dask files can only be opened in "rb" mode')
141
+ super().__init__(**kwargs)
142
+
143
+ def _upload_chunk(self, final=False):
144
+ pass
145
+
146
+ def _initiate_upload(self):
147
+ """Create remote file/upload"""
148
+ pass
149
+
150
+ def _fetch_range(self, start, end):
151
+ """Get the specified set of bytes from remote"""
152
+ return self.fs.fetch_range(self.path, self.mode, start, end)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/dbfs.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import urllib
3
+
4
+ import requests
5
+ import requests.exceptions
6
+ from requests.adapters import HTTPAdapter, Retry
7
+
8
+ from fsspec import AbstractFileSystem
9
+ from fsspec.spec import AbstractBufferedFile
10
+
11
+
12
+ class DatabricksException(Exception):
13
+ """
14
+ Helper class for exceptions raised in this module.
15
+ """
16
+
17
+ def __init__(self, error_code, message):
18
+ """Create a new DatabricksException"""
19
+ super().__init__(message)
20
+
21
+ self.error_code = error_code
22
+ self.message = message
23
+
24
+
25
+ class DatabricksFileSystem(AbstractFileSystem):
26
+ """
27
+ Get access to the Databricks filesystem implementation over HTTP.
28
+ Can be used inside and outside of a databricks cluster.
29
+ """
30
+
31
+ def __init__(self, instance, token, **kwargs):
32
+ """
33
+ Create a new DatabricksFileSystem.
34
+
35
+ Parameters
36
+ ----------
37
+ instance: str
38
+ The instance URL of the databricks cluster.
39
+ For example for an Azure databricks cluster, this
40
+ has the form adb-<some-number>.<two digits>.azuredatabricks.net.
41
+ token: str
42
+ Your personal token. Find out more
43
+ here: https://docs.databricks.com/dev-tools/api/latest/authentication.html
44
+ """
45
+ self.instance = instance
46
+ self.token = token
47
+ self.session = requests.Session()
48
+ self.retries = Retry(
49
+ total=10,
50
+ backoff_factor=0.05,
51
+ status_forcelist=[408, 429, 500, 502, 503, 504],
52
+ )
53
+
54
+ self.session.mount("https://", HTTPAdapter(max_retries=self.retries))
55
+ self.session.headers.update({"Authorization": f"Bearer {self.token}"})
56
+
57
+ super().__init__(**kwargs)
58
+
59
+ def ls(self, path, detail=True, **kwargs):
60
+ """
61
+ List the contents of the given path.
62
+
63
+ Parameters
64
+ ----------
65
+ path: str
66
+ Absolute path
67
+ detail: bool
68
+ Return not only the list of filenames,
69
+ but also additional information on file sizes
70
+ and types.
71
+ """
72
+ out = self._ls_from_cache(path)
73
+ if not out:
74
+ try:
75
+ r = self._send_to_api(
76
+ method="get", endpoint="list", json={"path": path}
77
+ )
78
+ except DatabricksException as e:
79
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
80
+ raise FileNotFoundError(e.message)
81
+
82
+ raise e
83
+ files = r["files"]
84
+ out = [
85
+ {
86
+ "name": o["path"],
87
+ "type": "directory" if o["is_dir"] else "file",
88
+ "size": o["file_size"],
89
+ }
90
+ for o in files
91
+ ]
92
+ self.dircache[path] = out
93
+
94
+ if detail:
95
+ return out
96
+ return [o["name"] for o in out]
97
+
98
+ def makedirs(self, path, exist_ok=True):
99
+ """
100
+ Create a given absolute path and all of its parents.
101
+
102
+ Parameters
103
+ ----------
104
+ path: str
105
+ Absolute path to create
106
+ exist_ok: bool
107
+ If false, checks if the folder
108
+ exists before creating it (and raises an
109
+ Exception if this is the case)
110
+ """
111
+ if not exist_ok:
112
+ try:
113
+ # If the following succeeds, the path is already present
114
+ self._send_to_api(
115
+ method="get", endpoint="get-status", json={"path": path}
116
+ )
117
+ raise FileExistsError(f"Path {path} already exists")
118
+ except DatabricksException as e:
119
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
120
+ pass
121
+
122
+ try:
123
+ self._send_to_api(method="post", endpoint="mkdirs", json={"path": path})
124
+ except DatabricksException as e:
125
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
126
+ raise FileExistsError(e.message)
127
+
128
+ raise e
129
+ self.invalidate_cache(self._parent(path))
130
+
131
+ def mkdir(self, path, create_parents=True, **kwargs):
132
+ """
133
+ Create a given absolute path and all of its parents.
134
+
135
+ Parameters
136
+ ----------
137
+ path: str
138
+ Absolute path to create
139
+ create_parents: bool
140
+ Whether to create all parents or not.
141
+ "False" is not implemented so far.
142
+ """
143
+ if not create_parents:
144
+ raise NotImplementedError
145
+
146
+ self.mkdirs(path, **kwargs)
147
+
148
+ def rm(self, path, recursive=False, **kwargs):
149
+ """
150
+ Remove the file or folder at the given absolute path.
151
+
152
+ Parameters
153
+ ----------
154
+ path: str
155
+ Absolute path what to remove
156
+ recursive: bool
157
+ Recursively delete all files in a folder.
158
+ """
159
+ try:
160
+ self._send_to_api(
161
+ method="post",
162
+ endpoint="delete",
163
+ json={"path": path, "recursive": recursive},
164
+ )
165
+ except DatabricksException as e:
166
+ # This is not really an exception, it just means
167
+ # not everything was deleted so far
168
+ if e.error_code == "PARTIAL_DELETE":
169
+ self.rm(path=path, recursive=recursive)
170
+ elif e.error_code == "IO_ERROR":
171
+ # Using the same exception as the os module would use here
172
+ raise OSError(e.message)
173
+
174
+ raise e
175
+ self.invalidate_cache(self._parent(path))
176
+
177
+ def mv(
178
+ self, source_path, destination_path, recursive=False, maxdepth=None, **kwargs
179
+ ):
180
+ """
181
+ Move a source to a destination path.
182
+
183
+ A note from the original [databricks API manual]
184
+ (https://docs.databricks.com/dev-tools/api/latest/dbfs.html#move).
185
+
186
+ When moving a large number of files the API call will time out after
187
+ approximately 60s, potentially resulting in partially moved data.
188
+ Therefore, for operations that move more than 10k files, we strongly
189
+ discourage using the DBFS REST API.
190
+
191
+ Parameters
192
+ ----------
193
+ source_path: str
194
+ From where to move (absolute path)
195
+ destination_path: str
196
+ To where to move (absolute path)
197
+ recursive: bool
198
+ Not implemented to far.
199
+ maxdepth:
200
+ Not implemented to far.
201
+ """
202
+ if recursive:
203
+ raise NotImplementedError
204
+ if maxdepth:
205
+ raise NotImplementedError
206
+
207
+ try:
208
+ self._send_to_api(
209
+ method="post",
210
+ endpoint="move",
211
+ json={"source_path": source_path, "destination_path": destination_path},
212
+ )
213
+ except DatabricksException as e:
214
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
215
+ raise FileNotFoundError(e.message)
216
+ elif e.error_code == "RESOURCE_ALREADY_EXISTS":
217
+ raise FileExistsError(e.message)
218
+
219
+ raise e
220
+ self.invalidate_cache(self._parent(source_path))
221
+ self.invalidate_cache(self._parent(destination_path))
222
+
223
+ def _open(self, path, mode="rb", block_size="default", **kwargs):
224
+ """
225
+ Overwrite the base class method to make sure to create a DBFile.
226
+ All arguments are copied from the base method.
227
+
228
+ Only the default blocksize is allowed.
229
+ """
230
+ return DatabricksFile(self, path, mode=mode, block_size=block_size, **kwargs)
231
+
232
+ def _send_to_api(self, method, endpoint, json):
233
+ """
234
+ Send the given json to the DBFS API
235
+ using a get or post request (specified by the argument `method`).
236
+
237
+ Parameters
238
+ ----------
239
+ method: str
240
+ Which http method to use for communication; "get" or "post".
241
+ endpoint: str
242
+ Where to send the request to (last part of the API URL)
243
+ json: dict
244
+ Dictionary of information to send
245
+ """
246
+ if method == "post":
247
+ session_call = self.session.post
248
+ elif method == "get":
249
+ session_call = self.session.get
250
+ else:
251
+ raise ValueError(f"Do not understand method {method}")
252
+
253
+ url = urllib.parse.urljoin(f"https://{self.instance}/api/2.0/dbfs/", endpoint)
254
+
255
+ r = session_call(url, json=json)
256
+
257
+ # The DBFS API will return a json, also in case of an exception.
258
+ # We want to preserve this information as good as possible.
259
+ try:
260
+ r.raise_for_status()
261
+ except requests.HTTPError as e:
262
+ # try to extract json error message
263
+ # if that fails, fall back to the original exception
264
+ try:
265
+ exception_json = e.response.json()
266
+ except Exception:
267
+ raise e
268
+
269
+ raise DatabricksException(**exception_json)
270
+
271
+ return r.json()
272
+
273
+ def _create_handle(self, path, overwrite=True):
274
+ """
275
+ Internal function to create a handle, which can be used to
276
+ write blocks of a file to DBFS.
277
+ A handle has a unique identifier which needs to be passed
278
+ whenever written during this transaction.
279
+ The handle is active for 10 minutes - after that a new
280
+ write transaction needs to be created.
281
+ Make sure to close the handle after you are finished.
282
+
283
+ Parameters
284
+ ----------
285
+ path: str
286
+ Absolute path for this file.
287
+ overwrite: bool
288
+ If a file already exist at this location, either overwrite
289
+ it or raise an exception.
290
+ """
291
+ try:
292
+ r = self._send_to_api(
293
+ method="post",
294
+ endpoint="create",
295
+ json={"path": path, "overwrite": overwrite},
296
+ )
297
+ return r["handle"]
298
+ except DatabricksException as e:
299
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
300
+ raise FileExistsError(e.message)
301
+
302
+ raise e
303
+
304
+ def _close_handle(self, handle):
305
+ """
306
+ Close a handle, which was opened by :func:`_create_handle`.
307
+
308
+ Parameters
309
+ ----------
310
+ handle: str
311
+ Which handle to close.
312
+ """
313
+ try:
314
+ self._send_to_api(method="post", endpoint="close", json={"handle": handle})
315
+ except DatabricksException as e:
316
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
317
+ raise FileNotFoundError(e.message)
318
+
319
+ raise e
320
+
321
+ def _add_data(self, handle, data):
322
+ """
323
+ Upload data to an already opened file handle
324
+ (opened by :func:`_create_handle`).
325
+ The maximal allowed data size is 1MB after
326
+ conversion to base64.
327
+ Remember to close the handle when you are finished.
328
+
329
+ Parameters
330
+ ----------
331
+ handle: str
332
+ Which handle to upload data to.
333
+ data: bytes
334
+ Block of data to add to the handle.
335
+ """
336
+ data = base64.b64encode(data).decode()
337
+ try:
338
+ self._send_to_api(
339
+ method="post",
340
+ endpoint="add-block",
341
+ json={"handle": handle, "data": data},
342
+ )
343
+ except DatabricksException as e:
344
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
345
+ raise FileNotFoundError(e.message)
346
+ elif e.error_code == "MAX_BLOCK_SIZE_EXCEEDED":
347
+ raise ValueError(e.message)
348
+
349
+ raise e
350
+
351
+ def _get_data(self, path, start, end):
352
+ """
353
+ Download data in bytes from a given absolute path in a block
354
+ from [start, start+length].
355
+ The maximum number of allowed bytes to read is 1MB.
356
+
357
+ Parameters
358
+ ----------
359
+ path: str
360
+ Absolute path to download data from
361
+ start: int
362
+ Start position of the block
363
+ end: int
364
+ End position of the block
365
+ """
366
+ try:
367
+ r = self._send_to_api(
368
+ method="get",
369
+ endpoint="read",
370
+ json={"path": path, "offset": start, "length": end - start},
371
+ )
372
+ return base64.b64decode(r["data"])
373
+ except DatabricksException as e:
374
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
375
+ raise FileNotFoundError(e.message)
376
+ elif e.error_code in ["INVALID_PARAMETER_VALUE", "MAX_READ_SIZE_EXCEEDED"]:
377
+ raise ValueError(e.message)
378
+
379
+ raise e
380
+
381
+ def invalidate_cache(self, path=None):
382
+ if path is None:
383
+ self.dircache.clear()
384
+ else:
385
+ self.dircache.pop(path, None)
386
+ super().invalidate_cache(path)
387
+
388
+
389
+ class DatabricksFile(AbstractBufferedFile):
390
+ """
391
+ Helper class for files referenced in the DatabricksFileSystem.
392
+ """
393
+
394
+ DEFAULT_BLOCK_SIZE = 1 * 2**20 # only allowed block size
395
+
396
+ def __init__(
397
+ self,
398
+ fs,
399
+ path,
400
+ mode="rb",
401
+ block_size="default",
402
+ autocommit=True,
403
+ cache_type="readahead",
404
+ cache_options=None,
405
+ **kwargs,
406
+ ):
407
+ """
408
+ Create a new instance of the DatabricksFile.
409
+
410
+ The blocksize needs to be the default one.
411
+ """
412
+ if block_size is None or block_size == "default":
413
+ block_size = self.DEFAULT_BLOCK_SIZE
414
+
415
+ assert (
416
+ block_size == self.DEFAULT_BLOCK_SIZE
417
+ ), f"Only the default block size is allowed, not {block_size}"
418
+
419
+ super().__init__(
420
+ fs,
421
+ path,
422
+ mode=mode,
423
+ block_size=block_size,
424
+ autocommit=autocommit,
425
+ cache_type=cache_type,
426
+ cache_options=cache_options or {},
427
+ **kwargs,
428
+ )
429
+
430
+ def _initiate_upload(self):
431
+ """Internal function to start a file upload"""
432
+ self.handle = self.fs._create_handle(self.path)
433
+
434
+ def _upload_chunk(self, final=False):
435
+ """Internal function to add a chunk of data to a started upload"""
436
+ self.buffer.seek(0)
437
+ data = self.buffer.getvalue()
438
+
439
+ data_chunks = [
440
+ data[start:end] for start, end in self._to_sized_blocks(len(data))
441
+ ]
442
+
443
+ for data_chunk in data_chunks:
444
+ self.fs._add_data(handle=self.handle, data=data_chunk)
445
+
446
+ if final:
447
+ self.fs._close_handle(handle=self.handle)
448
+ return True
449
+
450
+ def _fetch_range(self, start, end):
451
+ """Internal function to download a block of data"""
452
+ return_buffer = b""
453
+ length = end - start
454
+ for chunk_start, chunk_end in self._to_sized_blocks(length, start):
455
+ return_buffer += self.fs._get_data(
456
+ path=self.path, start=chunk_start, end=chunk_end
457
+ )
458
+
459
+ return return_buffer
460
+
461
+ def _to_sized_blocks(self, length, start=0):
462
+ """Helper function to split a range from 0 to total_length into bloksizes"""
463
+ end = start + length
464
+ for data_chunk in range(start, end, self.blocksize):
465
+ data_start = data_chunk
466
+ data_end = min(end, data_chunk + self.blocksize)
467
+ yield data_start, data_end
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/git.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pygit2
4
+
5
+ from fsspec.spec import AbstractFileSystem
6
+
7
+ from .memory import MemoryFile
8
+
9
+
10
+ class GitFileSystem(AbstractFileSystem):
11
+ """Browse the files of a local git repo at any hash/tag/branch
12
+
13
+ (experimental backend)
14
+ """
15
+
16
+ root_marker = ""
17
+ cachable = True
18
+
19
+ def __init__(self, path=None, fo=None, ref=None, **kwargs):
20
+ """
21
+
22
+ Parameters
23
+ ----------
24
+ path: str (optional)
25
+ Local location of the repo (uses current directory if not given).
26
+ May be deprecated in favour of ``fo``. When used with a higher
27
+ level function such as fsspec.open(), may be of the form
28
+ "git://[path-to-repo[:]][ref@]path/to/file" (but the actual
29
+ file path should not contain "@" or ":").
30
+ fo: str (optional)
31
+ Same as ``path``, but passed as part of a chained URL. This one
32
+ takes precedence if both are given.
33
+ ref: str (optional)
34
+ Reference to work with, could be a hash, tag or branch name. Defaults
35
+ to current working tree. Note that ``ls`` and ``open`` also take hash,
36
+ so this becomes the default for those operations
37
+ kwargs
38
+ """
39
+ super().__init__(**kwargs)
40
+ self.repo = pygit2.Repository(fo or path or os.getcwd())
41
+ self.ref = ref or "master"
42
+
43
+ @classmethod
44
+ def _strip_protocol(cls, path):
45
+ path = super()._strip_protocol(path).lstrip("/")
46
+ if ":" in path:
47
+ path = path.split(":", 1)[1]
48
+ if "@" in path:
49
+ path = path.split("@", 1)[1]
50
+ return path.lstrip("/")
51
+
52
+ def _path_to_object(self, path, ref):
53
+ comm, ref = self.repo.resolve_refish(ref or self.ref)
54
+ parts = path.split("/")
55
+ tree = comm.tree
56
+ for part in parts:
57
+ if part and isinstance(tree, pygit2.Tree):
58
+ tree = tree[part]
59
+ return tree
60
+
61
+ @staticmethod
62
+ def _get_kwargs_from_urls(path):
63
+ if path.startswith("git://"):
64
+ path = path[6:]
65
+ out = {}
66
+ if ":" in path:
67
+ out["path"], path = path.split(":", 1)
68
+ if "@" in path:
69
+ out["ref"], path = path.split("@", 1)
70
+ return out
71
+
72
+ def ls(self, path, detail=True, ref=None, **kwargs):
73
+ path = self._strip_protocol(path)
74
+ tree = self._path_to_object(path, ref)
75
+ if isinstance(tree, pygit2.Tree):
76
+ out = []
77
+ for obj in tree:
78
+ if isinstance(obj, pygit2.Tree):
79
+ out.append(
80
+ {
81
+ "type": "directory",
82
+ "name": "/".join([path, obj.name]).lstrip("/"),
83
+ "hex": obj.hex,
84
+ "mode": f"{obj.filemode:o}",
85
+ "size": 0,
86
+ }
87
+ )
88
+ else:
89
+ out.append(
90
+ {
91
+ "type": "file",
92
+ "name": "/".join([path, obj.name]).lstrip("/"),
93
+ "hex": obj.hex,
94
+ "mode": f"{obj.filemode:o}",
95
+ "size": obj.size,
96
+ }
97
+ )
98
+ else:
99
+ obj = tree
100
+ out = [
101
+ {
102
+ "type": "file",
103
+ "name": obj.name,
104
+ "hex": obj.hex,
105
+ "mode": f"{obj.filemode:o}",
106
+ "size": obj.size,
107
+ }
108
+ ]
109
+ if detail:
110
+ return out
111
+ return [o["name"] for o in out]
112
+
113
+ def ukey(self, path, ref=None):
114
+ return self.info(path, ref=ref)["hex"]
115
+
116
+ def _open(
117
+ self,
118
+ path,
119
+ mode="rb",
120
+ block_size=None,
121
+ autocommit=True,
122
+ cache_options=None,
123
+ ref=None,
124
+ **kwargs,
125
+ ):
126
+ obj = self._path_to_object(path, ref or self.ref)
127
+ return MemoryFile(data=obj.data)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/github.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ from ..spec import AbstractFileSystem
4
+ from ..utils import infer_storage_options
5
+ from .memory import MemoryFile
6
+
7
+ # TODO: add GIST backend, would be very similar
8
+
9
+
10
+ class GithubFileSystem(AbstractFileSystem):
11
+ """Interface to files in github
12
+
13
+ An instance of this class provides the files residing within a remote github
14
+ repository. You may specify a point in the repos history, by SHA, branch
15
+ or tag (default is current master).
16
+
17
+ Given that code files tend to be small, and that github does not support
18
+ retrieving partial content, we always fetch whole files.
19
+
20
+ When using fsspec.open, allows URIs of the form:
21
+
22
+ - "github://path/file", in which case you must specify org, repo and
23
+ may specify sha in the extra args
24
+ - 'github://org:repo@/precip/catalog.yml', where the org and repo are
25
+ part of the URI
26
+ - 'github://org:repo@sha/precip/catalog.yml', where the sha is also included
27
+
28
+ ``sha`` can be the full or abbreviated hex of the commit you want to fetch
29
+ from, or a branch or tag name (so long as it doesn't contain special characters
30
+ like "/", "?", which would have to be HTTP-encoded).
31
+
32
+ For authorised access, you must provide username and token, which can be made
33
+ at https://github.com/settings/tokens
34
+ """
35
+
36
+ url = "https://api.github.com/repos/{org}/{repo}/git/trees/{sha}"
37
+ rurl = "https://raw.githubusercontent.com/{org}/{repo}/{sha}/{path}"
38
+ protocol = "github"
39
+ timeout = (60, 60) # connect, read timeouts
40
+
41
+ def __init__(
42
+ self, org, repo, sha=None, username=None, token=None, timeout=None, **kwargs
43
+ ):
44
+ super().__init__(**kwargs)
45
+ self.org = org
46
+ self.repo = repo
47
+ if (username is None) ^ (token is None):
48
+ raise ValueError("Auth required both username and token")
49
+ self.username = username
50
+ self.token = token
51
+ if timeout is not None:
52
+ self.timeout = timeout
53
+ if sha is None:
54
+ # look up default branch (not necessarily "master")
55
+ u = "https://api.github.com/repos/{org}/{repo}"
56
+ r = requests.get(
57
+ u.format(org=org, repo=repo), timeout=self.timeout, **self.kw
58
+ )
59
+ r.raise_for_status()
60
+ sha = r.json()["default_branch"]
61
+
62
+ self.root = sha
63
+ self.ls("")
64
+
65
+ @property
66
+ def kw(self):
67
+ if self.username:
68
+ return {"auth": (self.username, self.token)}
69
+ return {}
70
+
71
+ @classmethod
72
+ def repos(cls, org_or_user, is_org=True):
73
+ """List repo names for given org or user
74
+
75
+ This may become the top level of the FS
76
+
77
+ Parameters
78
+ ----------
79
+ org_or_user: str
80
+ Name of the github org or user to query
81
+ is_org: bool (default True)
82
+ Whether the name is an organisation (True) or user (False)
83
+
84
+ Returns
85
+ -------
86
+ List of string
87
+ """
88
+ r = requests.get(
89
+ f"https://api.github.com/{['users', 'orgs'][is_org]}/{org_or_user}/repos",
90
+ timeout=cls.timeout,
91
+ )
92
+ r.raise_for_status()
93
+ return [repo["name"] for repo in r.json()]
94
+
95
+ @property
96
+ def tags(self):
97
+ """Names of tags in the repo"""
98
+ r = requests.get(
99
+ f"https://api.github.com/repos/{self.org}/{self.repo}/tags",
100
+ timeout=self.timeout,
101
+ **self.kw,
102
+ )
103
+ r.raise_for_status()
104
+ return [t["name"] for t in r.json()]
105
+
106
+ @property
107
+ def branches(self):
108
+ """Names of branches in the repo"""
109
+ r = requests.get(
110
+ f"https://api.github.com/repos/{self.org}/{self.repo}/branches",
111
+ timeout=self.timeout,
112
+ **self.kw,
113
+ )
114
+ r.raise_for_status()
115
+ return [t["name"] for t in r.json()]
116
+
117
+ @property
118
+ def refs(self):
119
+ """Named references, tags and branches"""
120
+ return {"tags": self.tags, "branches": self.branches}
121
+
122
+ def ls(self, path, detail=False, sha=None, _sha=None, **kwargs):
123
+ """List files at given path
124
+
125
+ Parameters
126
+ ----------
127
+ path: str
128
+ Location to list, relative to repo root
129
+ detail: bool
130
+ If True, returns list of dicts, one per file; if False, returns
131
+ list of full filenames only
132
+ sha: str (optional)
133
+ List at the given point in the repo history, branch or tag name or commit
134
+ SHA
135
+ _sha: str (optional)
136
+ List this specific tree object (used internally to descend into trees)
137
+ """
138
+ path = self._strip_protocol(path)
139
+ if path == "":
140
+ _sha = sha or self.root
141
+ if _sha is None:
142
+ parts = path.rstrip("/").split("/")
143
+ so_far = ""
144
+ _sha = sha or self.root
145
+ for part in parts:
146
+ out = self.ls(so_far, True, sha=sha, _sha=_sha)
147
+ so_far += "/" + part if so_far else part
148
+ out = [o for o in out if o["name"] == so_far]
149
+ if not out:
150
+ raise FileNotFoundError(path)
151
+ out = out[0]
152
+ if out["type"] == "file":
153
+ if detail:
154
+ return [out]
155
+ else:
156
+ return path
157
+ _sha = out["sha"]
158
+ if path not in self.dircache or sha not in [self.root, None]:
159
+ r = requests.get(
160
+ self.url.format(org=self.org, repo=self.repo, sha=_sha),
161
+ timeout=self.timeout,
162
+ **self.kw,
163
+ )
164
+ if r.status_code == 404:
165
+ raise FileNotFoundError(path)
166
+ r.raise_for_status()
167
+ types = {"blob": "file", "tree": "directory"}
168
+ out = [
169
+ {
170
+ "name": path + "/" + f["path"] if path else f["path"],
171
+ "mode": f["mode"],
172
+ "type": types[f["type"]],
173
+ "size": f.get("size", 0),
174
+ "sha": f["sha"],
175
+ }
176
+ for f in r.json()["tree"]
177
+ if f["type"] in types
178
+ ]
179
+ if sha in [self.root, None]:
180
+ self.dircache[path] = out
181
+ else:
182
+ out = self.dircache[path]
183
+ if detail:
184
+ return out
185
+ else:
186
+ return sorted([f["name"] for f in out])
187
+
188
+ def invalidate_cache(self, path=None):
189
+ self.dircache.clear()
190
+
191
+ @classmethod
192
+ def _strip_protocol(cls, path):
193
+ opts = infer_storage_options(path)
194
+ if "username" not in opts:
195
+ return super()._strip_protocol(path)
196
+ return opts["path"].lstrip("/")
197
+
198
+ @staticmethod
199
+ def _get_kwargs_from_urls(path):
200
+ opts = infer_storage_options(path)
201
+ if "username" not in opts:
202
+ return {}
203
+ out = {"org": opts["username"], "repo": opts["password"]}
204
+ if opts["host"]:
205
+ out["sha"] = opts["host"]
206
+ return out
207
+
208
+ def _open(
209
+ self,
210
+ path,
211
+ mode="rb",
212
+ block_size=None,
213
+ autocommit=True,
214
+ cache_options=None,
215
+ sha=None,
216
+ **kwargs,
217
+ ):
218
+ if mode != "rb":
219
+ raise NotImplementedError
220
+ url = self.rurl.format(
221
+ org=self.org, repo=self.repo, path=path, sha=sha or self.root
222
+ )
223
+ r = requests.get(url, timeout=self.timeout, **self.kw)
224
+ if r.status_code == 404:
225
+ raise FileNotFoundError(path)
226
+ r.raise_for_status()
227
+ return MemoryFile(None, None, r.content)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/libarchive.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from ctypes import (
3
+ CFUNCTYPE,
4
+ POINTER,
5
+ c_int,
6
+ c_longlong,
7
+ c_void_p,
8
+ cast,
9
+ create_string_buffer,
10
+ )
11
+
12
+ import libarchive
13
+ import libarchive.ffi as ffi
14
+
15
+ from fsspec import open_files
16
+ from fsspec.archive import AbstractArchiveFileSystem
17
+ from fsspec.implementations.memory import MemoryFile
18
+ from fsspec.utils import DEFAULT_BLOCK_SIZE
19
+
20
+ # Libarchive requires seekable files or memory only for certain archive
21
+ # types. However, since we read the directory first to cache the contents
22
+ # and also allow random access to any file, the file-like object needs
23
+ # to be seekable no matter what.
24
+
25
+ # Seek call-backs (not provided in the libarchive python wrapper)
26
+ SEEK_CALLBACK = CFUNCTYPE(c_longlong, c_int, c_void_p, c_longlong, c_int)
27
+ read_set_seek_callback = ffi.ffi(
28
+ "read_set_seek_callback", [ffi.c_archive_p, SEEK_CALLBACK], c_int, ffi.check_int
29
+ )
30
+ new_api = hasattr(ffi, "NO_OPEN_CB")
31
+
32
+
33
+ @contextmanager
34
+ def custom_reader(file, format_name="all", filter_name="all", block_size=ffi.page_size):
35
+ """Read an archive from a seekable file-like object.
36
+
37
+ The `file` object must support the standard `readinto` and 'seek' methods.
38
+ """
39
+ buf = create_string_buffer(block_size)
40
+ buf_p = cast(buf, c_void_p)
41
+
42
+ def read_func(archive_p, context, ptrptr):
43
+ # readinto the buffer, returns number of bytes read
44
+ length = file.readinto(buf)
45
+ # write the address of the buffer into the pointer
46
+ ptrptr = cast(ptrptr, POINTER(c_void_p))
47
+ ptrptr[0] = buf_p
48
+ # tell libarchive how much data was written into the buffer
49
+ return length
50
+
51
+ def seek_func(archive_p, context, offset, whence):
52
+ file.seek(offset, whence)
53
+ # tell libarchvie the current position
54
+ return file.tell()
55
+
56
+ read_cb = ffi.READ_CALLBACK(read_func)
57
+ seek_cb = SEEK_CALLBACK(seek_func)
58
+
59
+ if new_api:
60
+ open_cb = ffi.NO_OPEN_CB
61
+ close_cb = ffi.NO_CLOSE_CB
62
+ else:
63
+ open_cb = libarchive.read.OPEN_CALLBACK(ffi.VOID_CB)
64
+ close_cb = libarchive.read.CLOSE_CALLBACK(ffi.VOID_CB)
65
+
66
+ with libarchive.read.new_archive_read(format_name, filter_name) as archive_p:
67
+ read_set_seek_callback(archive_p, seek_cb)
68
+ ffi.read_open(archive_p, None, open_cb, read_cb, close_cb)
69
+ yield libarchive.read.ArchiveRead(archive_p)
70
+
71
+
72
+ class LibArchiveFileSystem(AbstractArchiveFileSystem):
73
+ """Compressed archives as a file-system (read-only)
74
+
75
+ Supports the following formats:
76
+ tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar
77
+ Microsoft CAB, 7-Zip, WARC
78
+
79
+ See the libarchive documentation for further restrictions.
80
+ https://www.libarchive.org/
81
+
82
+ Keeps file object open while instance lives. It only works in seekable
83
+ file-like objects. In case the filesystem does not support this kind of
84
+ file object, it is recommended to cache locally.
85
+
86
+ This class is pickleable, but not necessarily thread-safe (depends on the
87
+ platform). See libarchive documentation for details.
88
+ """
89
+
90
+ root_marker = ""
91
+ protocol = "libarchive"
92
+ cachable = False
93
+
94
+ def __init__(
95
+ self,
96
+ fo="",
97
+ mode="r",
98
+ target_protocol=None,
99
+ target_options=None,
100
+ block_size=DEFAULT_BLOCK_SIZE,
101
+ **kwargs,
102
+ ):
103
+ """
104
+ Parameters
105
+ ----------
106
+ fo: str or file-like
107
+ Contains ZIP, and must exist. If a str, will fetch file using
108
+ :meth:`~fsspec.open_files`, which must return one file exactly.
109
+ mode: str
110
+ Currently, only 'r' accepted
111
+ target_protocol: str (optional)
112
+ If ``fo`` is a string, this value can be used to override the
113
+ FS protocol inferred from a URL
114
+ target_options: dict (optional)
115
+ Kwargs passed when instantiating the target FS, if ``fo`` is
116
+ a string.
117
+ """
118
+ super().__init__(self, **kwargs)
119
+ if mode != "r":
120
+ raise ValueError("Only read from archive files accepted")
121
+ if isinstance(fo, str):
122
+ files = open_files(fo, protocol=target_protocol, **(target_options or {}))
123
+ if len(files) != 1:
124
+ raise ValueError(
125
+ f'Path "{fo}" did not resolve to exactly one file: "{files}"'
126
+ )
127
+ fo = files[0]
128
+ self.of = fo
129
+ self.fo = fo.__enter__() # the whole instance is a context
130
+ self.block_size = block_size
131
+ self.dir_cache = None
132
+
133
+ @contextmanager
134
+ def _open_archive(self):
135
+ self.fo.seek(0)
136
+ with custom_reader(self.fo, block_size=self.block_size) as arc:
137
+ yield arc
138
+
139
+ @classmethod
140
+ def _strip_protocol(cls, path):
141
+ # file paths are always relative to the archive root
142
+ return super()._strip_protocol(path).lstrip("/")
143
+
144
+ def _get_dirs(self):
145
+ fields = {
146
+ "name": "pathname",
147
+ "size": "size",
148
+ "created": "ctime",
149
+ "mode": "mode",
150
+ "uid": "uid",
151
+ "gid": "gid",
152
+ "mtime": "mtime",
153
+ }
154
+
155
+ if self.dir_cache is not None:
156
+ return
157
+
158
+ self.dir_cache = {}
159
+ list_names = []
160
+ with self._open_archive() as arc:
161
+ for entry in arc:
162
+ if not entry.isdir and not entry.isfile:
163
+ # Skip symbolic links, fifo entries, etc.
164
+ continue
165
+ self.dir_cache.update(
166
+ {
167
+ dirname: {"name": dirname, "size": 0, "type": "directory"}
168
+ for dirname in self._all_dirnames(set(entry.name))
169
+ }
170
+ )
171
+ f = {key: getattr(entry, fields[key]) for key in fields}
172
+ f["type"] = "directory" if entry.isdir else "file"
173
+ list_names.append(entry.name)
174
+
175
+ self.dir_cache[f["name"]] = f
176
+ # libarchive does not seem to return an entry for the directories (at least
177
+ # not in all formats), so get the directories names from the files names
178
+ self.dir_cache.update(
179
+ {
180
+ dirname: {"name": dirname, "size": 0, "type": "directory"}
181
+ for dirname in self._all_dirnames(list_names)
182
+ }
183
+ )
184
+
185
+ def _open(
186
+ self,
187
+ path,
188
+ mode="rb",
189
+ block_size=None,
190
+ autocommit=True,
191
+ cache_options=None,
192
+ **kwargs,
193
+ ):
194
+ path = self._strip_protocol(path)
195
+ if mode != "rb":
196
+ raise NotImplementedError
197
+
198
+ data = bytes()
199
+ with self._open_archive() as arc:
200
+ for entry in arc:
201
+ if entry.pathname != path:
202
+ continue
203
+
204
+ if entry.size == 0:
205
+ # empty file, so there are no blocks
206
+ break
207
+
208
+ for block in entry.get_blocks(entry.size):
209
+ data = block
210
+ break
211
+ else:
212
+ raise ValueError
213
+ return MemoryFile(fs=self, path=path, data=data)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/local.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import io
3
+ import logging
4
+ import os
5
+ import os.path as osp
6
+ import re
7
+ import shutil
8
+ import stat
9
+ import tempfile
10
+
11
+ from fsspec import AbstractFileSystem
12
+ from fsspec.compression import compr
13
+ from fsspec.core import get_compression
14
+ from fsspec.utils import isfilelike, stringify_path
15
+
16
+ logger = logging.getLogger("fsspec.local")
17
+
18
+
19
+ class LocalFileSystem(AbstractFileSystem):
20
+ """Interface to files on local storage
21
+
22
+ Parameters
23
+ ----------
24
+ auto_mkdir: bool
25
+ Whether, when opening a file, the directory containing it should
26
+ be created (if it doesn't already exist). This is assumed by pyarrow
27
+ code.
28
+ """
29
+
30
+ root_marker = "/"
31
+ protocol = "file", "local"
32
+ local_file = True
33
+
34
+ def __init__(self, auto_mkdir=False, **kwargs):
35
+ super().__init__(**kwargs)
36
+ self.auto_mkdir = auto_mkdir
37
+
38
+ @property
39
+ def fsid(self):
40
+ return "local"
41
+
42
+ def mkdir(self, path, create_parents=True, **kwargs):
43
+ path = self._strip_protocol(path)
44
+ if self.exists(path):
45
+ raise FileExistsError(path)
46
+ if create_parents:
47
+ self.makedirs(path, exist_ok=True)
48
+ else:
49
+ os.mkdir(path, **kwargs)
50
+
51
+ def makedirs(self, path, exist_ok=False):
52
+ path = self._strip_protocol(path)
53
+ os.makedirs(path, exist_ok=exist_ok)
54
+
55
+ def rmdir(self, path):
56
+ path = self._strip_protocol(path)
57
+ os.rmdir(path)
58
+
59
+ def ls(self, path, detail=False, **kwargs):
60
+ path = self._strip_protocol(path)
61
+ info = self.info(path)
62
+ if info["type"] == "directory":
63
+ with os.scandir(path) as it:
64
+ infos = [self.info(f) for f in it]
65
+ else:
66
+ infos = [info]
67
+
68
+ if not detail:
69
+ return [i["name"] for i in infos]
70
+ return infos
71
+
72
+ def info(self, path, **kwargs):
73
+ if isinstance(path, os.DirEntry):
74
+ # scandir DirEntry
75
+ out = path.stat(follow_symlinks=False)
76
+ link = path.is_symlink()
77
+ if path.is_dir(follow_symlinks=False):
78
+ t = "directory"
79
+ elif path.is_file(follow_symlinks=False):
80
+ t = "file"
81
+ else:
82
+ t = "other"
83
+ path = self._strip_protocol(path.path)
84
+ else:
85
+ # str or path-like
86
+ path = self._strip_protocol(path)
87
+ out = os.stat(path, follow_symlinks=False)
88
+ link = stat.S_ISLNK(out.st_mode)
89
+ if link:
90
+ out = os.stat(path, follow_symlinks=True)
91
+ if stat.S_ISDIR(out.st_mode):
92
+ t = "directory"
93
+ elif stat.S_ISREG(out.st_mode):
94
+ t = "file"
95
+ else:
96
+ t = "other"
97
+ result = {
98
+ "name": path,
99
+ "size": out.st_size,
100
+ "type": t,
101
+ "created": out.st_ctime,
102
+ "islink": link,
103
+ }
104
+ for field in ["mode", "uid", "gid", "mtime", "ino", "nlink"]:
105
+ result[field] = getattr(out, f"st_{field}")
106
+ if result["islink"]:
107
+ result["destination"] = os.readlink(path)
108
+ try:
109
+ out2 = os.stat(path, follow_symlinks=True)
110
+ result["size"] = out2.st_size
111
+ except OSError:
112
+ result["size"] = 0
113
+ return result
114
+
115
+ def lexists(self, path, **kwargs):
116
+ return osp.lexists(path)
117
+
118
+ def cp_file(self, path1, path2, **kwargs):
119
+ path1 = self._strip_protocol(path1).rstrip("/")
120
+ path2 = self._strip_protocol(path2).rstrip("/")
121
+ if self.auto_mkdir:
122
+ self.makedirs(self._parent(path2), exist_ok=True)
123
+ if self.isfile(path1):
124
+ shutil.copyfile(path1, path2)
125
+ elif self.isdir(path1):
126
+ self.mkdirs(path2, exist_ok=True)
127
+ else:
128
+ raise FileNotFoundError(path1)
129
+
130
+ def get_file(self, path1, path2, callback=None, **kwargs):
131
+ if isfilelike(path2):
132
+ with open(path1, "rb") as f:
133
+ shutil.copyfileobj(f, path2)
134
+ else:
135
+ return self.cp_file(path1, path2, **kwargs)
136
+
137
+ def put_file(self, path1, path2, callback=None, **kwargs):
138
+ return self.cp_file(path1, path2, **kwargs)
139
+
140
+ def mv_file(self, path1, path2, **kwargs):
141
+ path1 = self._strip_protocol(path1).rstrip("/")
142
+ path2 = self._strip_protocol(path2).rstrip("/")
143
+ shutil.move(path1, path2)
144
+
145
+ def link(self, src, dst, **kwargs):
146
+ src = self._strip_protocol(src)
147
+ dst = self._strip_protocol(dst)
148
+ os.link(src, dst, **kwargs)
149
+
150
+ def symlink(self, src, dst, **kwargs):
151
+ src = self._strip_protocol(src)
152
+ dst = self._strip_protocol(dst)
153
+ os.symlink(src, dst, **kwargs)
154
+
155
+ def islink(self, path) -> bool:
156
+ return os.path.islink(self._strip_protocol(path))
157
+
158
+ def rm_file(self, path):
159
+ os.remove(self._strip_protocol(path))
160
+
161
+ def rm(self, path, recursive=False, maxdepth=None):
162
+ if not isinstance(path, list):
163
+ path = [path]
164
+
165
+ for p in path:
166
+ p = self._strip_protocol(p).rstrip("/")
167
+ if self.isdir(p):
168
+ if not recursive:
169
+ raise ValueError("Cannot delete directory, set recursive=True")
170
+ if osp.abspath(p) == os.getcwd():
171
+ raise ValueError("Cannot delete current working directory")
172
+ shutil.rmtree(p)
173
+ else:
174
+ os.remove(p)
175
+
176
+ def unstrip_protocol(self, name):
177
+ name = self._strip_protocol(name) # normalise for local/win/...
178
+ return f"file://{name}"
179
+
180
+ def _open(self, path, mode="rb", block_size=None, **kwargs):
181
+ path = self._strip_protocol(path)
182
+ if self.auto_mkdir and "w" in mode:
183
+ self.makedirs(self._parent(path), exist_ok=True)
184
+ return LocalFileOpener(path, mode, fs=self, **kwargs)
185
+
186
+ def touch(self, path, truncate=True, **kwargs):
187
+ path = self._strip_protocol(path)
188
+ if self.auto_mkdir:
189
+ self.makedirs(self._parent(path), exist_ok=True)
190
+ if self.exists(path):
191
+ os.utime(path, None)
192
+ else:
193
+ open(path, "a").close()
194
+ if truncate:
195
+ os.truncate(path, 0)
196
+
197
+ def created(self, path):
198
+ info = self.info(path=path)
199
+ return datetime.datetime.fromtimestamp(
200
+ info["created"], tz=datetime.timezone.utc
201
+ )
202
+
203
+ def modified(self, path):
204
+ info = self.info(path=path)
205
+ return datetime.datetime.fromtimestamp(info["mtime"], tz=datetime.timezone.utc)
206
+
207
+ @classmethod
208
+ def _parent(cls, path):
209
+ path = cls._strip_protocol(path).rstrip("/")
210
+ if "/" in path:
211
+ return path.rsplit("/", 1)[0]
212
+ else:
213
+ return cls.root_marker
214
+
215
+ @classmethod
216
+ def _strip_protocol(cls, path):
217
+ path = stringify_path(path)
218
+ if path.startswith("file://"):
219
+ path = path[7:]
220
+ elif path.startswith("file:"):
221
+ path = path[5:]
222
+ elif path.startswith("local://"):
223
+ path = path[8:]
224
+ elif path.startswith("local:"):
225
+ path = path[6:]
226
+ return make_path_posix(path).rstrip("/") or cls.root_marker
227
+
228
+ def _isfilestore(self):
229
+ # Inheriting from DaskFileSystem makes this False (S3, etc. were)
230
+ # the original motivation. But we are a posix-like file system.
231
+ # See https://github.com/dask/dask/issues/5526
232
+ return True
233
+
234
+ def chmod(self, path, mode):
235
+ path = stringify_path(path)
236
+ return os.chmod(path, mode)
237
+
238
+
239
+ def make_path_posix(path, sep=os.sep):
240
+ """Make path generic"""
241
+ if isinstance(path, (list, set, tuple)):
242
+ return type(path)(make_path_posix(p) for p in path)
243
+ if "~" in path:
244
+ path = osp.expanduser(path)
245
+ if sep == "/":
246
+ # most common fast case for posix
247
+ if path.startswith("/"):
248
+ return path
249
+ if path.startswith("./"):
250
+ path = path[2:]
251
+ return f"{os.getcwd()}/{path}"
252
+ if (
253
+ (sep not in path and "/" not in path)
254
+ or (sep == "/" and not path.startswith("/"))
255
+ or (sep == "\\" and ":" not in path and not path.startswith("\\\\"))
256
+ ):
257
+ # relative path like "path" or "rel\\path" (win) or rel/path"
258
+ if os.sep == "\\":
259
+ # abspath made some more '\\' separators
260
+ return make_path_posix(osp.abspath(path))
261
+ else:
262
+ return f"{os.getcwd()}/{path}"
263
+ if path.startswith("file://"):
264
+ path = path[7:]
265
+ if re.match("/[A-Za-z]:", path):
266
+ # for windows file URI like "file:///C:/folder/file"
267
+ # or "file:///C:\\dir\\file"
268
+ path = path[1:].replace("\\", "/").replace("//", "/")
269
+ if path.startswith("\\\\"):
270
+ # special case for windows UNC/DFS-style paths, do nothing,
271
+ # just flip the slashes around (case below does not work!)
272
+ return path.replace("\\", "/")
273
+ if re.match("[A-Za-z]:", path):
274
+ # windows full path like "C:\\local\\path"
275
+ return path.lstrip("\\").replace("\\", "/").replace("//", "/")
276
+ if path.startswith("\\"):
277
+ # windows network path like "\\server\\path"
278
+ return "/" + path.lstrip("\\").replace("\\", "/").replace("//", "/")
279
+ return path
280
+
281
+
282
+ def trailing_sep(path):
283
+ """Return True if the path ends with a path separator.
284
+
285
+ A forward slash is always considered a path separator, even on Operating
286
+ Systems that normally use a backslash.
287
+ """
288
+ # TODO: if all incoming paths were posix-compliant then separator would
289
+ # always be a forward slash, simplifying this function.
290
+ # See https://github.com/fsspec/filesystem_spec/pull/1250
291
+ return path.endswith(os.sep) or (os.altsep is not None and path.endswith(os.altsep))
292
+
293
+
294
+ class LocalFileOpener(io.IOBase):
295
+ def __init__(
296
+ self, path, mode, autocommit=True, fs=None, compression=None, **kwargs
297
+ ):
298
+ logger.debug("open file: %s", path)
299
+ self.path = path
300
+ self.mode = mode
301
+ self.fs = fs
302
+ self.f = None
303
+ self.autocommit = autocommit
304
+ self.compression = get_compression(path, compression)
305
+ self.blocksize = io.DEFAULT_BUFFER_SIZE
306
+ self._open()
307
+
308
+ def _open(self):
309
+ if self.f is None or self.f.closed:
310
+ if self.autocommit or "w" not in self.mode:
311
+ self.f = open(self.path, mode=self.mode)
312
+ if self.compression:
313
+ compress = compr[self.compression]
314
+ self.f = compress(self.f, mode=self.mode)
315
+ else:
316
+ # TODO: check if path is writable?
317
+ i, name = tempfile.mkstemp()
318
+ os.close(i) # we want normal open and normal buffered file
319
+ self.temp = name
320
+ self.f = open(name, mode=self.mode)
321
+ if "w" not in self.mode:
322
+ self.size = self.f.seek(0, 2)
323
+ self.f.seek(0)
324
+ self.f.size = self.size
325
+
326
+ def _fetch_range(self, start, end):
327
+ # probably only used by cached FS
328
+ if "r" not in self.mode:
329
+ raise ValueError
330
+ self._open()
331
+ self.f.seek(start)
332
+ return self.f.read(end - start)
333
+
334
+ def __setstate__(self, state):
335
+ self.f = None
336
+ loc = state.pop("loc", None)
337
+ self.__dict__.update(state)
338
+ if "r" in state["mode"]:
339
+ self.f = None
340
+ self._open()
341
+ self.f.seek(loc)
342
+
343
+ def __getstate__(self):
344
+ d = self.__dict__.copy()
345
+ d.pop("f")
346
+ if "r" in self.mode:
347
+ d["loc"] = self.f.tell()
348
+ else:
349
+ if not self.f.closed:
350
+ raise ValueError("Cannot serialise open write-mode local file")
351
+ return d
352
+
353
+ def commit(self):
354
+ if self.autocommit:
355
+ raise RuntimeError("Can only commit if not already set to autocommit")
356
+ shutil.move(self.temp, self.path)
357
+
358
+ def discard(self):
359
+ if self.autocommit:
360
+ raise RuntimeError("Cannot discard if set to autocommit")
361
+ os.remove(self.temp)
362
+
363
+ def readable(self) -> bool:
364
+ return True
365
+
366
+ def writable(self) -> bool:
367
+ return "r" not in self.mode
368
+
369
+ def read(self, *args, **kwargs):
370
+ return self.f.read(*args, **kwargs)
371
+
372
+ def write(self, *args, **kwargs):
373
+ return self.f.write(*args, **kwargs)
374
+
375
+ def tell(self, *args, **kwargs):
376
+ return self.f.tell(*args, **kwargs)
377
+
378
+ def seek(self, *args, **kwargs):
379
+ return self.f.seek(*args, **kwargs)
380
+
381
+ def seekable(self, *args, **kwargs):
382
+ return self.f.seekable(*args, **kwargs)
383
+
384
+ def readline(self, *args, **kwargs):
385
+ return self.f.readline(*args, **kwargs)
386
+
387
+ def readlines(self, *args, **kwargs):
388
+ return self.f.readlines(*args, **kwargs)
389
+
390
+ def close(self):
391
+ return self.f.close()
392
+
393
+ def truncate(self, size=None) -> int:
394
+ return self.f.truncate(size)
395
+
396
+ @property
397
+ def closed(self):
398
+ return self.f.closed
399
+
400
+ def fileno(self):
401
+ return self.raw.fileno()
402
+
403
+ def flush(self) -> None:
404
+ self.f.flush()
405
+
406
+ def __iter__(self):
407
+ return self.f.__iter__()
408
+
409
+ def __getattr__(self, item):
410
+ return getattr(self.f, item)
411
+
412
+ def __enter__(self):
413
+ self._incontext = True
414
+ return self
415
+
416
+ def __exit__(self, exc_type, exc_value, traceback):
417
+ self._incontext = False
418
+ self.f.__exit__(exc_type, exc_value, traceback)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/reference.py ADDED
@@ -0,0 +1,1160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import collections
3
+ import io
4
+ import itertools
5
+ import logging
6
+ import math
7
+ import os
8
+ from functools import lru_cache
9
+ from typing import TYPE_CHECKING
10
+
11
+ import fsspec.core
12
+
13
+ try:
14
+ import ujson as json
15
+ except ImportError:
16
+ if not TYPE_CHECKING:
17
+ import json
18
+
19
+ from ..asyn import AsyncFileSystem
20
+ from ..callbacks import DEFAULT_CALLBACK
21
+ from ..core import filesystem, open, split_protocol
22
+ from ..utils import isfilelike, merge_offset_ranges, other_paths
23
+
24
+ logger = logging.getLogger("fsspec.reference")
25
+
26
+
27
+ class ReferenceNotReachable(RuntimeError):
28
+ def __init__(self, reference, target, *args):
29
+ super().__init__(*args)
30
+ self.reference = reference
31
+ self.target = target
32
+
33
+ def __str__(self):
34
+ return f'Reference "{self.reference}" failed to fetch target {self.target}'
35
+
36
+
37
+ def _first(d):
38
+ return list(d.values())[0]
39
+
40
+
41
+ def _prot_in_references(path, references):
42
+ ref = references.get(path)
43
+ if isinstance(ref, (list, tuple)):
44
+ return split_protocol(ref[0])[0] if ref[0] else ref[0]
45
+
46
+
47
+ def _protocol_groups(paths, references):
48
+ if isinstance(paths, str):
49
+ return {_prot_in_references(paths, references): [paths]}
50
+ out = {}
51
+ for path in paths:
52
+ protocol = _prot_in_references(path, references)
53
+ out.setdefault(protocol, []).append(path)
54
+ return out
55
+
56
+
57
+ class RefsValuesView(collections.abc.ValuesView):
58
+ def __iter__(self):
59
+ for val in self._mapping.zmetadata.values():
60
+ yield json.dumps(val).encode()
61
+ yield from self._mapping._items.values()
62
+ for field in self._mapping.listdir():
63
+ chunk_sizes = self._mapping._get_chunk_sizes(field)
64
+ if len(chunk_sizes) == 0:
65
+ yield self._mapping[field + "/0"]
66
+ continue
67
+ yield from self._mapping._generate_all_records(field)
68
+
69
+
70
+ class RefsItemsView(collections.abc.ItemsView):
71
+ def __iter__(self):
72
+ return zip(self._mapping.keys(), self._mapping.values())
73
+
74
+
75
+ def ravel_multi_index(idx, sizes):
76
+ val = 0
77
+ mult = 1
78
+ for i, s in zip(idx[::-1], sizes[::-1]):
79
+ val += i * mult
80
+ mult *= s
81
+ return val
82
+
83
+
84
+ class LazyReferenceMapper(collections.abc.MutableMapping):
85
+ """This interface can be used to read/write references from Parquet stores.
86
+ It is not intended for other types of references.
87
+ It can be used with Kerchunk's MultiZarrToZarr method to combine
88
+ references into a parquet store.
89
+ Examples of this use-case can be found here:
90
+ https://fsspec.github.io/kerchunk/advanced.html?highlight=parquet#parquet-storage"""
91
+
92
+ # import is class level to prevent numpy dep requirement for fsspec
93
+ @property
94
+ def np(self):
95
+ import numpy as np
96
+
97
+ return np
98
+
99
+ @property
100
+ def pd(self):
101
+ import pandas as pd
102
+
103
+ return pd
104
+
105
+ def __init__(
106
+ self, root, fs=None, out_root=None, cache_size=128, categorical_threshold=10
107
+ ):
108
+ """
109
+
110
+ This instance will be writable, storing changes in memory until full partitions
111
+ are accumulated or .flush() is called.
112
+
113
+ To create an empty lazy store, use .create()
114
+
115
+ Parameters
116
+ ----------
117
+ root : str
118
+ Root of parquet store
119
+ fs : fsspec.AbstractFileSystem
120
+ fsspec filesystem object, default is local filesystem.
121
+ cache_size : int, default=128
122
+ Maximum size of LRU cache, where cache_size*record_size denotes
123
+ the total number of references that can be loaded in memory at once.
124
+ categorical_threshold : int
125
+ Encode urls as pandas.Categorical to reduce memory footprint if the ratio
126
+ of the number of unique urls to total number of refs for each variable
127
+ is greater than or equal to this number. (default 10)
128
+ """
129
+ self.root = root
130
+ self.chunk_sizes = {}
131
+ self.out_root = out_root or self.root
132
+ self.cat_thresh = categorical_threshold
133
+ self.cache_size = cache_size
134
+ self.dirs = None
135
+ self.url = self.root + "/{field}/refs.{record}.parq"
136
+ # TODO: derive fs from `root`
137
+ self.fs = fsspec.filesystem("file") if fs is None else fs
138
+
139
+ def __getattr__(self, item):
140
+ if item in ("_items", "record_size", "zmetadata"):
141
+ self.setup()
142
+ # avoid possible recursion if setup fails somehow
143
+ return self.__dict__[item]
144
+ raise AttributeError(item)
145
+
146
+ def setup(self):
147
+ self._items = {}
148
+ self._items[".zmetadata"] = self.fs.cat_file(
149
+ "/".join([self.root, ".zmetadata"])
150
+ )
151
+ met = json.loads(self._items[".zmetadata"])
152
+ self.record_size = met["record_size"]
153
+ self.zmetadata = met["metadata"]
154
+
155
+ # Define function to open and decompress refs
156
+ @lru_cache(maxsize=self.cache_size)
157
+ def open_refs(field, record):
158
+ """cached parquet file loader"""
159
+ path = self.url.format(field=field, record=record)
160
+ data = io.BytesIO(self.fs.cat_file(path))
161
+ df = self.pd.read_parquet(data, engine="fastparquet")
162
+ refs = {c: df[c].values for c in df.columns}
163
+ return refs
164
+
165
+ self.open_refs = open_refs
166
+
167
+ @staticmethod
168
+ def create(root, storage_options=None, fs=None, record_size=10000, **kwargs):
169
+ """Make empty parquet reference set
170
+
171
+ First deletes the contents of the given directory, if it exists.
172
+
173
+ Parameters
174
+ ----------
175
+ root: str
176
+ Directory to contain the output; will be created
177
+ storage_options: dict | None
178
+ For making the filesystem to use for writing is fs is None
179
+ fs: FileSystem | None
180
+ Filesystem for writing
181
+ record_size: int
182
+ Number of references per parquet file
183
+ kwargs: passed to __init__
184
+
185
+ Returns
186
+ -------
187
+ LazyReferenceMapper instance
188
+ """
189
+ met = {"metadata": {}, "record_size": record_size}
190
+ if fs is None:
191
+ fs, root = fsspec.core.url_to_fs(root, **(storage_options or {}))
192
+ if fs.exists(root):
193
+ fs.rm(root, recursive=True)
194
+ fs.makedirs(root, exist_ok=True)
195
+ fs.pipe("/".join([root, ".zmetadata"]), json.dumps(met).encode())
196
+ return LazyReferenceMapper(root, fs, **kwargs)
197
+
198
+ def listdir(self, basename=True):
199
+ """List top-level directories"""
200
+ # cache me?
201
+ if self.dirs is None:
202
+ dirs = [p.split("/", 1)[0] for p in self.zmetadata]
203
+ self.dirs = {p for p in dirs if p and not p.startswith(".")}
204
+ listing = self.dirs
205
+ if basename:
206
+ listing = [os.path.basename(path) for path in listing]
207
+ return listing
208
+
209
+ def ls(self, path="", detail=True):
210
+ """Shortcut file listings"""
211
+ if not path:
212
+ dirnames = self.listdir()
213
+ others = set(
214
+ [".zmetadata"]
215
+ + [name for name in self.zmetadata if "/" not in name]
216
+ + [name for name in self._items if "/" not in name]
217
+ )
218
+ if detail is False:
219
+ others.update(dirnames)
220
+ return sorted(others)
221
+ dirinfo = [
222
+ {"name": name, "type": "directory", "size": 0} for name in dirnames
223
+ ]
224
+ fileinfo = [
225
+ {
226
+ "name": name,
227
+ "type": "file",
228
+ "size": len(
229
+ json.dumps(self.zmetadata[name])
230
+ if name in self.zmetadata
231
+ else self._items[name]
232
+ ),
233
+ }
234
+ for name in others
235
+ ]
236
+ return sorted(dirinfo + fileinfo, key=lambda s: s["name"])
237
+ parts = path.split("/", 1)
238
+ if len(parts) > 1:
239
+ raise FileNotFoundError("Cannot list within directories right now")
240
+ field = parts[0]
241
+ others = set(
242
+ [name for name in self.zmetadata if name.startswith(f"{path}/")]
243
+ + [name for name in self._items if name.startswith(f"{path}/")]
244
+ )
245
+ fileinfo = [
246
+ {
247
+ "name": name,
248
+ "type": "file",
249
+ "size": len(
250
+ json.dumps(self.zmetadata[name])
251
+ if name in self.zmetadata
252
+ else self._items[name]
253
+ ),
254
+ }
255
+ for name in others
256
+ ]
257
+ keys = self._keys_in_field(field)
258
+
259
+ if detail is False:
260
+ return list(others) + list(keys)
261
+ recs = self._generate_all_records(field)
262
+ recinfo = [
263
+ {"name": name, "type": "file", "size": rec[-1]}
264
+ for name, rec in zip(keys, recs)
265
+ if rec[0] # filters out path==None, deleted/missing
266
+ ]
267
+ return fileinfo + recinfo
268
+
269
+ def _load_one_key(self, key):
270
+ """Get the reference for one key
271
+
272
+ Returns bytes, one-element list or three-element list.
273
+ """
274
+ if key in self._items:
275
+ return self._items[key]
276
+ elif key in self.zmetadata:
277
+ return json.dumps(self.zmetadata[key]).encode()
278
+ elif "/" not in key or self._is_meta(key):
279
+ raise KeyError(key)
280
+ field, sub_key = key.split("/")
281
+ record, ri, chunk_size = self._key_to_record(key)
282
+ maybe = self._items.get((field, record), {}).get(ri, False)
283
+ if maybe is None:
284
+ # explicitly deleted
285
+ raise KeyError
286
+ elif maybe:
287
+ return maybe
288
+ elif chunk_size == 0:
289
+ return b""
290
+
291
+ # Chunk keys can be loaded from row group and cached in LRU cache
292
+ try:
293
+ refs = self.open_refs(field, record)
294
+ except (ValueError, TypeError, FileNotFoundError):
295
+ raise KeyError(key)
296
+ columns = ["path", "offset", "size", "raw"]
297
+ selection = [refs[c][ri] if c in refs else None for c in columns]
298
+ raw = selection[-1]
299
+ if raw is not None:
300
+ return raw
301
+ if selection[0] is None:
302
+ raise KeyError("This reference does not exist or has been deleted")
303
+ if selection[1:3] == [0, 0]:
304
+ # URL only
305
+ return selection[:1]
306
+ # URL, offset, size
307
+ return selection[:3]
308
+
309
+ @lru_cache(4096)
310
+ def _key_to_record(self, key):
311
+ """Details needed to construct a reference for one key"""
312
+ field, chunk = key.split("/")
313
+ chunk_sizes = self._get_chunk_sizes(field)
314
+ if len(chunk_sizes) == 0:
315
+ return 0, 0, 0
316
+ chunk_idx = [int(c) for c in chunk.split(".")]
317
+ chunk_number = ravel_multi_index(chunk_idx, chunk_sizes)
318
+ record = chunk_number // self.record_size
319
+ ri = chunk_number % self.record_size
320
+ return record, ri, len(chunk_sizes)
321
+
322
+ def _get_chunk_sizes(self, field):
323
+ """The number of chunks along each axis for a given field"""
324
+ if field not in self.chunk_sizes:
325
+ zarray = self.zmetadata[f"{field}/.zarray"]
326
+ size_ratio = [
327
+ math.ceil(s / c) for s, c in zip(zarray["shape"], zarray["chunks"])
328
+ ]
329
+ self.chunk_sizes[field] = size_ratio or [1]
330
+ return self.chunk_sizes[field]
331
+
332
+ def _generate_record(self, field, record):
333
+ """The references for a given parquet file of a given field"""
334
+ refs = self.open_refs(field, record)
335
+ it = iter(zip(*refs.values()))
336
+ if len(refs) == 3:
337
+ # All urls
338
+ return (list(t) for t in it)
339
+ elif len(refs) == 1:
340
+ # All raws
341
+ return refs["raw"]
342
+ else:
343
+ # Mix of urls and raws
344
+ return (list(t[:3]) if not t[3] else t[3] for t in it)
345
+
346
+ def _generate_all_records(self, field):
347
+ """Load all the references within a field by iterating over the parquet files"""
348
+ nrec = 1
349
+ for ch in self._get_chunk_sizes(field):
350
+ nrec *= ch
351
+ nrec = math.ceil(nrec / self.record_size)
352
+ for record in range(nrec):
353
+ yield from self._generate_record(field, record)
354
+
355
+ def values(self):
356
+ return RefsValuesView(self)
357
+
358
+ def items(self):
359
+ return RefsItemsView(self)
360
+
361
+ def __hash__(self):
362
+ return id(self)
363
+
364
+ def __getitem__(self, key):
365
+ return self._load_one_key(key)
366
+
367
+ def __setitem__(self, key, value):
368
+ if "/" in key and not self._is_meta(key):
369
+ field, chunk = key.split("/")
370
+ record, i, _ = self._key_to_record(key)
371
+ subdict = self._items.setdefault((field, record), {})
372
+ subdict[i] = value
373
+ if len(subdict) == self.record_size:
374
+ self.write(field, record)
375
+ else:
376
+ # metadata or top-level
377
+ self._items[key] = value
378
+ new_value = json.loads(
379
+ value.decode() if isinstance(value, bytes) else value
380
+ )
381
+ self.zmetadata[key] = {**self.zmetadata.get(key, {}), **new_value}
382
+
383
+ @staticmethod
384
+ def _is_meta(key):
385
+ return key.startswith(".z") or "/.z" in key
386
+
387
+ def __delitem__(self, key):
388
+ if key in self._items:
389
+ del self._items[key]
390
+ elif key in self.zmetadata:
391
+ del self.zmetadata[key]
392
+ else:
393
+ if "/" in key and not self._is_meta(key):
394
+ field, chunk = key.split("/")
395
+ record, i, _ = self._key_to_record(key)
396
+ subdict = self._items.setdefault((field, record), {})
397
+ subdict[i] = None
398
+ if len(subdict) == self.record_size:
399
+ self.write(field, record)
400
+ else:
401
+ # metadata or top-level
402
+ self._items[key] = None
403
+
404
+ def write(self, field, record, base_url=None, storage_options=None):
405
+ # extra requirements if writing
406
+ import kerchunk.df
407
+ import numpy as np
408
+ import pandas as pd
409
+
410
+ partition = self._items[(field, record)]
411
+ original = False
412
+ if len(partition) < self.record_size:
413
+ try:
414
+ original = self.open_refs(field, record)
415
+ except IOError:
416
+ pass
417
+
418
+ if original:
419
+ paths = original["path"]
420
+ offsets = original["offset"]
421
+ sizes = original["size"]
422
+ raws = original["raw"]
423
+ else:
424
+ paths = np.full(self.record_size, np.nan, dtype="O")
425
+ offsets = np.zeros(self.record_size, dtype="int64")
426
+ sizes = np.zeros(self.record_size, dtype="int64")
427
+ raws = np.full(self.record_size, np.nan, dtype="O")
428
+ for j, data in partition.items():
429
+ if isinstance(data, list):
430
+ if (
431
+ str(paths.dtype) == "category"
432
+ and data[0] not in paths.dtype.categories
433
+ ):
434
+ paths = paths.add_categories(data[0])
435
+ paths[j] = data[0]
436
+ if len(data) > 1:
437
+ offsets[j] = data[1]
438
+ sizes[j] = data[2]
439
+ elif data is None:
440
+ # delete
441
+ paths[j] = None
442
+ offsets[j] = 0
443
+ sizes[j] = 0
444
+ raws[j] = None
445
+ else:
446
+ # this is the only call into kerchunk, could remove
447
+ raws[j] = kerchunk.df._proc_raw(data)
448
+ # TODO: only save needed columns
449
+ df = pd.DataFrame(
450
+ {
451
+ "path": paths,
452
+ "offset": offsets,
453
+ "size": sizes,
454
+ "raw": raws,
455
+ },
456
+ copy=False,
457
+ )
458
+ if df.path.count() / (df.path.nunique() or 1) > self.cat_thresh:
459
+ df["path"] = df["path"].astype("category")
460
+ object_encoding = {"raw": "bytes", "path": "utf8"}
461
+ has_nulls = ["path", "raw"]
462
+
463
+ fn = f"{base_url or self.out_root}/{field}/refs.{record}.parq"
464
+ self.fs.mkdirs(f"{base_url or self.out_root}/{field}", exist_ok=True)
465
+ df.to_parquet(
466
+ fn,
467
+ engine="fastparquet",
468
+ storage_options=storage_options
469
+ or getattr(self.fs, "storage_options", None),
470
+ compression="zstd",
471
+ index=False,
472
+ stats=False,
473
+ object_encoding=object_encoding,
474
+ has_nulls=has_nulls,
475
+ # **kwargs,
476
+ )
477
+ partition.clear()
478
+ self._items.pop((field, record))
479
+
480
+ def flush(self, base_url=None, storage_options=None):
481
+ """Output any modified or deleted keys
482
+
483
+ Parameters
484
+ ----------
485
+ base_url: str
486
+ Location of the output
487
+ """
488
+ # write what we have so far and clear sub chunks
489
+ for thing in list(self._items):
490
+ if isinstance(thing, tuple):
491
+ field, record = thing
492
+ self.write(
493
+ field,
494
+ record,
495
+ base_url=base_url,
496
+ storage_options=storage_options,
497
+ )
498
+
499
+ # gather .zmetadata from self._items and write that too
500
+ for k in list(self._items):
501
+ if k != ".zmetadata" and ".z" in k:
502
+ self.zmetadata[k] = json.loads(self._items.pop(k))
503
+ met = {"metadata": self.zmetadata, "record_size": self.record_size}
504
+ self._items[".zmetadata"] = json.dumps(met).encode()
505
+ self.fs.pipe(
506
+ "/".join([base_url or self.out_root, ".zmetadata"]),
507
+ self._items[".zmetadata"],
508
+ )
509
+
510
+ # TODO: only clear those that we wrote to?
511
+ self.open_refs.cache_clear()
512
+
513
+ def __len__(self):
514
+ # Caveat: This counts expected references, not actual - but is fast
515
+ count = 0
516
+ for field in self.listdir():
517
+ if field.startswith("."):
518
+ count += 1
519
+ else:
520
+ count += math.prod(self._get_chunk_sizes(field))
521
+ count += len(self.zmetadata) # all metadata keys
522
+ # any other files not in reference partitions
523
+ count += sum(1 for _ in self._items if not isinstance(_, tuple))
524
+ return count
525
+
526
+ def __iter__(self):
527
+ # Caveat: returns only existing keys, so the number of these does not
528
+ # match len(self)
529
+ metas = set(self.zmetadata)
530
+ metas.update(self._items)
531
+ for bit in metas:
532
+ if isinstance(bit, str):
533
+ yield bit
534
+ for field in self.listdir():
535
+ for k in self._keys_in_field(field):
536
+ if k in self:
537
+ yield k
538
+
539
+ def __contains__(self, item):
540
+ try:
541
+ self._load_one_key(item)
542
+ return True
543
+ except KeyError:
544
+ return False
545
+
546
+ def _keys_in_field(self, field):
547
+ """List key names in given field
548
+
549
+ Produces strings like "field/x.y" appropriate from the chunking of the array
550
+ """
551
+ chunk_sizes = self._get_chunk_sizes(field)
552
+ if len(chunk_sizes) == 0:
553
+ yield field + "/0"
554
+ return
555
+ inds = itertools.product(*(range(i) for i in chunk_sizes))
556
+ for ind in inds:
557
+ yield field + "/" + ".".join([str(c) for c in ind])
558
+
559
+
560
+ class ReferenceFileSystem(AsyncFileSystem):
561
+ """View byte ranges of some other file as a file system
562
+ Initial version: single file system target, which must support
563
+ async, and must allow start and end args in _cat_file. Later versions
564
+ may allow multiple arbitrary URLs for the targets.
565
+ This FileSystem is read-only. It is designed to be used with async
566
+ targets (for now). This FileSystem only allows whole-file access, no
567
+ ``open``. We do not get original file details from the target FS.
568
+ Configuration is by passing a dict of references at init, or a URL to
569
+ a JSON file containing the same; this dict
570
+ can also contain concrete data for some set of paths.
571
+ Reference dict format:
572
+ {path0: bytes_data, path1: (target_url, offset, size)}
573
+ https://github.com/fsspec/kerchunk/blob/main/README.md
574
+ """
575
+
576
+ protocol = "reference"
577
+
578
+ def __init__(
579
+ self,
580
+ fo,
581
+ target=None,
582
+ ref_storage_args=None,
583
+ target_protocol=None,
584
+ target_options=None,
585
+ remote_protocol=None,
586
+ remote_options=None,
587
+ fs=None,
588
+ template_overrides=None,
589
+ simple_templates=True,
590
+ max_gap=64_000,
591
+ max_block=256_000_000,
592
+ cache_size=128,
593
+ **kwargs,
594
+ ):
595
+ """
596
+ Parameters
597
+ ----------
598
+ fo : dict or str
599
+ The set of references to use for this instance, with a structure as above.
600
+ If str referencing a JSON file, will use fsspec.open, in conjunction
601
+ with target_options and target_protocol to open and parse JSON at this
602
+ location. If a directory, then assume references are a set of parquet
603
+ files to be loaded lazily.
604
+ target : str
605
+ For any references having target_url as None, this is the default file
606
+ target to use
607
+ ref_storage_args : dict
608
+ If references is a str, use these kwargs for loading the JSON file.
609
+ Deprecated: use target_options instead.
610
+ target_protocol : str
611
+ Used for loading the reference file, if it is a path. If None, protocol
612
+ will be derived from the given path
613
+ target_options : dict
614
+ Extra FS options for loading the reference file ``fo``, if given as a path
615
+ remote_protocol : str
616
+ The protocol of the filesystem on which the references will be evaluated
617
+ (unless fs is provided). If not given, will be derived from the first
618
+ URL that has a protocol in the templates or in the references, in that
619
+ order.
620
+ remote_options : dict
621
+ kwargs to go with remote_protocol
622
+ fs : AbstractFileSystem | dict(str, (AbstractFileSystem | dict))
623
+ Directly provide a file system(s):
624
+ - a single filesystem instance
625
+ - a dict of protocol:filesystem, where each value is either a filesystem
626
+ instance, or a dict of kwargs that can be used to create in
627
+ instance for the given protocol
628
+
629
+ If this is given, remote_options and remote_protocol are ignored.
630
+ template_overrides : dict
631
+ Swap out any templates in the references file with these - useful for
632
+ testing.
633
+ simple_templates: bool
634
+ Whether templates can be processed with simple replace (True) or if
635
+ jinja is needed (False, much slower). All reference sets produced by
636
+ ``kerchunk`` are simple in this sense, but the spec allows for complex.
637
+ max_gap, max_block: int
638
+ For merging multiple concurrent requests to the same remote file.
639
+ Neighboring byte ranges will only be merged when their
640
+ inter-range gap is <= ``max_gap``. Default is 64KB. Set to 0
641
+ to only merge when it requires no extra bytes. Pass a negative
642
+ number to disable merging, appropriate for local target files.
643
+ Neighboring byte ranges will only be merged when the size of
644
+ the aggregated range is <= ``max_block``. Default is 256MB.
645
+ cache_size : int
646
+ Maximum size of LRU cache, where cache_size*record_size denotes
647
+ the total number of references that can be loaded in memory at once.
648
+ Only used for lazily loaded references.
649
+ kwargs : passed to parent class
650
+ """
651
+ super().__init__(**kwargs)
652
+ self.target = target
653
+ self.template_overrides = template_overrides
654
+ self.simple_templates = simple_templates
655
+ self.templates = {}
656
+ self.fss = {}
657
+ self._dircache = {}
658
+ self.max_gap = max_gap
659
+ self.max_block = max_block
660
+ if isinstance(fo, str):
661
+ dic = dict(
662
+ **(ref_storage_args or target_options or {}), protocol=target_protocol
663
+ )
664
+ ref_fs, fo2 = fsspec.core.url_to_fs(fo, **dic)
665
+ if ref_fs.isfile(fo2):
666
+ # text JSON
667
+ with fsspec.open(fo, "rb", **dic) as f:
668
+ logger.info("Read reference from URL %s", fo)
669
+ text = json.load(f)
670
+ self._process_references(text, template_overrides)
671
+ else:
672
+ # Lazy parquet refs
673
+ logger.info("Open lazy reference dict from URL %s", fo)
674
+ self.references = LazyReferenceMapper(
675
+ fo2,
676
+ fs=ref_fs,
677
+ cache_size=cache_size,
678
+ )
679
+ else:
680
+ # dictionaries
681
+ self._process_references(fo, template_overrides)
682
+ if isinstance(fs, dict):
683
+ self.fss = {
684
+ k: (
685
+ fsspec.filesystem(k.split(":", 1)[0], **opts)
686
+ if isinstance(opts, dict)
687
+ else opts
688
+ )
689
+ for k, opts in fs.items()
690
+ }
691
+ if None not in self.fss:
692
+ self.fss[None] = filesystem("file")
693
+ return
694
+ if fs is not None:
695
+ # single remote FS
696
+ remote_protocol = (
697
+ fs.protocol[0] if isinstance(fs.protocol, tuple) else fs.protocol
698
+ )
699
+ self.fss[remote_protocol] = fs
700
+
701
+ if remote_protocol is None:
702
+ # get single protocol from any templates
703
+ for ref in self.templates.values():
704
+ if callable(ref):
705
+ ref = ref()
706
+ protocol, _ = fsspec.core.split_protocol(ref)
707
+ if protocol and protocol not in self.fss:
708
+ fs = filesystem(protocol, **(remote_options or {}))
709
+ self.fss[protocol] = fs
710
+ if remote_protocol is None:
711
+ # get single protocol from references
712
+ # TODO: warning here, since this can be very expensive?
713
+ for ref in self.references.values():
714
+ if callable(ref):
715
+ ref = ref()
716
+ if isinstance(ref, list) and ref[0]:
717
+ protocol, _ = fsspec.core.split_protocol(ref[0])
718
+ if protocol not in self.fss:
719
+ fs = filesystem(protocol, **(remote_options or {}))
720
+ self.fss[protocol] = fs
721
+ # only use first remote URL
722
+ break
723
+
724
+ if remote_protocol and remote_protocol not in self.fss:
725
+ fs = filesystem(remote_protocol, **(remote_options or {}))
726
+ self.fss[remote_protocol] = fs
727
+
728
+ self.fss[None] = fs or filesystem("file") # default one
729
+
730
+ def _cat_common(self, path, start=None, end=None):
731
+ path = self._strip_protocol(path)
732
+ logger.debug(f"cat: {path}")
733
+ try:
734
+ part = self.references[path]
735
+ except KeyError:
736
+ raise FileNotFoundError(path)
737
+ if isinstance(part, str):
738
+ part = part.encode()
739
+ if isinstance(part, bytes):
740
+ logger.debug(f"Reference: {path}, type bytes")
741
+ if part.startswith(b"base64:"):
742
+ part = base64.b64decode(part[7:])
743
+ return part, None, None
744
+
745
+ if len(part) == 1:
746
+ logger.debug(f"Reference: {path}, whole file => {part}")
747
+ url = part[0]
748
+ start1, end1 = start, end
749
+ else:
750
+ url, start0, size = part
751
+ logger.debug(f"Reference: {path} => {url}, offset {start0}, size {size}")
752
+ end0 = start0 + size
753
+
754
+ if start is not None:
755
+ if start >= 0:
756
+ start1 = start0 + start
757
+ else:
758
+ start1 = end0 + start
759
+ else:
760
+ start1 = start0
761
+ if end is not None:
762
+ if end >= 0:
763
+ end1 = start0 + end
764
+ else:
765
+ end1 = end0 + end
766
+ else:
767
+ end1 = end0
768
+ if url is None:
769
+ url = self.target
770
+ return url, start1, end1
771
+
772
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
773
+ part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
774
+ if isinstance(part_or_url, bytes):
775
+ return part_or_url[start:end]
776
+ protocol, _ = split_protocol(part_or_url)
777
+ try:
778
+ await self.fss[protocol]._cat_file(part_or_url, start=start, end=end)
779
+ except Exception as e:
780
+ raise ReferenceNotReachable(path, part_or_url) from e
781
+
782
+ def cat_file(self, path, start=None, end=None, **kwargs):
783
+ part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
784
+ if isinstance(part_or_url, bytes):
785
+ return part_or_url[start:end]
786
+ protocol, _ = split_protocol(part_or_url)
787
+ try:
788
+ return self.fss[protocol].cat_file(part_or_url, start=start0, end=end0)
789
+ except Exception as e:
790
+ raise ReferenceNotReachable(path, part_or_url) from e
791
+
792
+ def pipe_file(self, path, value, **_):
793
+ """Temporarily add binary data or reference as a file"""
794
+ self.references[path] = value
795
+
796
+ async def _get_file(self, rpath, lpath, **kwargs):
797
+ if self.isdir(rpath):
798
+ return os.makedirs(lpath, exist_ok=True)
799
+ data = await self._cat_file(rpath)
800
+ with open(lpath, "wb") as f:
801
+ f.write(data)
802
+
803
+ def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, **kwargs):
804
+ if self.isdir(rpath):
805
+ return os.makedirs(lpath, exist_ok=True)
806
+ data = self.cat_file(rpath, **kwargs)
807
+ callback.set_size(len(data))
808
+ if isfilelike(lpath):
809
+ lpath.write(data)
810
+ else:
811
+ with open(lpath, "wb") as f:
812
+ f.write(data)
813
+ callback.absolute_update(len(data))
814
+
815
+ def get(self, rpath, lpath, recursive=False, **kwargs):
816
+ if recursive:
817
+ # trigger directory build
818
+ self.ls("")
819
+ rpath = self.expand_path(rpath, recursive=recursive)
820
+ fs = fsspec.filesystem("file", auto_mkdir=True)
821
+ targets = other_paths(rpath, lpath)
822
+ if recursive:
823
+ data = self.cat([r for r in rpath if not self.isdir(r)])
824
+ else:
825
+ data = self.cat(rpath)
826
+ for remote, local in zip(rpath, targets):
827
+ if remote in data:
828
+ fs.pipe_file(local, data[remote])
829
+
830
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
831
+ if isinstance(path, str) and recursive:
832
+ raise NotImplementedError
833
+ if isinstance(path, list) and (recursive or any("*" in p for p in path)):
834
+ raise NotImplementedError
835
+ # TODO: if references is lazy, pre-fetch all paths in batch before access
836
+ proto_dict = _protocol_groups(path, self.references)
837
+ out = {}
838
+ for proto, paths in proto_dict.items():
839
+ fs = self.fss[proto]
840
+ urls, starts, ends, valid_paths = [], [], [], []
841
+ for p in paths:
842
+ # find references or label not-found. Early exit if any not
843
+ # found and on_error is "raise"
844
+ try:
845
+ u, s, e = self._cat_common(p)
846
+ except FileNotFoundError as err:
847
+ if on_error == "raise":
848
+ raise
849
+ if on_error != "omit":
850
+ out[p] = err
851
+ else:
852
+ urls.append(u)
853
+ starts.append(s)
854
+ ends.append(e)
855
+ valid_paths.append(p)
856
+
857
+ # process references into form for merging
858
+ urls2 = []
859
+ starts2 = []
860
+ ends2 = []
861
+ paths2 = []
862
+ whole_files = set()
863
+ for u, s, e, p in zip(urls, starts, ends, valid_paths):
864
+ if isinstance(u, bytes):
865
+ # data
866
+ out[p] = u
867
+ elif s is None:
868
+ # whole file - limits are None, None, but no further
869
+ # entries take for this file
870
+ whole_files.add(u)
871
+ urls2.append(u)
872
+ starts2.append(s)
873
+ ends2.append(e)
874
+ paths2.append(p)
875
+ for u, s, e, p in zip(urls, starts, ends, valid_paths):
876
+ # second run to account for files that are to be loaded whole
877
+ if s is not None and u not in whole_files:
878
+ urls2.append(u)
879
+ starts2.append(s)
880
+ ends2.append(e)
881
+ paths2.append(p)
882
+
883
+ # merge and fetch consolidated ranges
884
+ new_paths, new_starts, new_ends = merge_offset_ranges(
885
+ list(urls2),
886
+ list(starts2),
887
+ list(ends2),
888
+ sort=True,
889
+ max_gap=self.max_gap,
890
+ max_block=self.max_block,
891
+ )
892
+ bytes_out = fs.cat_ranges(new_paths, new_starts, new_ends)
893
+
894
+ # unbundle from merged bytes - simple approach
895
+ for u, s, e, p in zip(urls, starts, ends, valid_paths):
896
+ if p in out:
897
+ continue # was bytes, already handled
898
+ for np, ns, ne, b in zip(new_paths, new_starts, new_ends, bytes_out):
899
+ if np == u and (ns is None or ne is None):
900
+ if isinstance(b, Exception):
901
+ out[p] = b
902
+ else:
903
+ out[p] = b[s:e]
904
+ elif np == u and s >= ns and e <= ne:
905
+ if isinstance(b, Exception):
906
+ out[p] = b
907
+ else:
908
+ out[p] = b[s - ns : (e - ne) or None]
909
+
910
+ for k, v in out.copy().items():
911
+ # these were valid references, but fetch failed, so transform exc
912
+ if isinstance(v, Exception) and k in self.references:
913
+ ex = out[k]
914
+ new_ex = ReferenceNotReachable(k, self.references[k])
915
+ new_ex.__cause__ = ex
916
+ if on_error == "raise":
917
+ raise new_ex
918
+ elif on_error != "omit":
919
+ out[k] = new_ex
920
+
921
+ if len(out) == 1 and isinstance(path, str) and "*" not in path:
922
+ return _first(out)
923
+ return out
924
+
925
+ def _process_references(self, references, template_overrides=None):
926
+ vers = references.get("version", None)
927
+ if vers is None:
928
+ self._process_references0(references)
929
+ elif vers == 1:
930
+ self._process_references1(references, template_overrides=template_overrides)
931
+ else:
932
+ raise ValueError(f"Unknown reference spec version: {vers}")
933
+ # TODO: we make dircache by iterating over all entries, but for Spec >= 1,
934
+ # can replace with programmatic. Is it even needed for mapper interface?
935
+
936
+ def _process_references0(self, references):
937
+ """Make reference dict for Spec Version 0"""
938
+ self.references = references
939
+
940
+ def _process_references1(self, references, template_overrides=None):
941
+ if not self.simple_templates or self.templates:
942
+ import jinja2
943
+ self.references = {}
944
+ self._process_templates(references.get("templates", {}))
945
+
946
+ @lru_cache(1000)
947
+ def _render_jinja(u):
948
+ return jinja2.Template(u).render(**self.templates)
949
+
950
+ for k, v in references.get("refs", {}).items():
951
+ if isinstance(v, str):
952
+ if v.startswith("base64:"):
953
+ self.references[k] = base64.b64decode(v[7:])
954
+ self.references[k] = v
955
+ elif self.templates:
956
+ u = v[0]
957
+ if "{{" in u:
958
+ if self.simple_templates:
959
+ u = (
960
+ u.replace("{{", "{")
961
+ .replace("}}", "}")
962
+ .format(**self.templates)
963
+ )
964
+ else:
965
+ u = _render_jinja(u)
966
+ self.references[k] = [u] if len(v) == 1 else [u, v[1], v[2]]
967
+ else:
968
+ self.references[k] = v
969
+ self.references.update(self._process_gen(references.get("gen", [])))
970
+
971
+ def _process_templates(self, tmp):
972
+ self.templates = {}
973
+ if self.template_overrides is not None:
974
+ tmp.update(self.template_overrides)
975
+ for k, v in tmp.items():
976
+ if "{{" in v:
977
+ import jinja2
978
+
979
+ self.templates[k] = lambda temp=v, **kwargs: jinja2.Template(
980
+ temp
981
+ ).render(**kwargs)
982
+ else:
983
+ self.templates[k] = v
984
+
985
+ def _process_gen(self, gens):
986
+ out = {}
987
+ for gen in gens:
988
+ dimension = {
989
+ k: v
990
+ if isinstance(v, list)
991
+ else range(v.get("start", 0), v["stop"], v.get("step", 1))
992
+ for k, v in gen["dimensions"].items()
993
+ }
994
+ products = (
995
+ dict(zip(dimension.keys(), values))
996
+ for values in itertools.product(*dimension.values())
997
+ )
998
+ for pr in products:
999
+ import jinja2
1000
+
1001
+ key = jinja2.Template(gen["key"]).render(**pr, **self.templates)
1002
+ url = jinja2.Template(gen["url"]).render(**pr, **self.templates)
1003
+ if ("offset" in gen) and ("length" in gen):
1004
+ offset = int(
1005
+ jinja2.Template(gen["offset"]).render(**pr, **self.templates)
1006
+ )
1007
+ length = int(
1008
+ jinja2.Template(gen["length"]).render(**pr, **self.templates)
1009
+ )
1010
+ out[key] = [url, offset, length]
1011
+ elif ("offset" in gen) ^ ("length" in gen):
1012
+ raise ValueError(
1013
+ "Both 'offset' and 'length' are required for a "
1014
+ "reference generator entry if either is provided."
1015
+ )
1016
+ else:
1017
+ out[key] = [url]
1018
+ return out
1019
+
1020
+ def _dircache_from_items(self):
1021
+ self.dircache = {"": []}
1022
+ it = self.references.items()
1023
+ for path, part in it:
1024
+ if isinstance(part, (bytes, str)):
1025
+ size = len(part)
1026
+ elif len(part) == 1:
1027
+ size = None
1028
+ else:
1029
+ _, _, size = part
1030
+ par = path.rsplit("/", 1)[0] if "/" in path else ""
1031
+ par0 = par
1032
+ subdirs = [par0]
1033
+ while par0 and par0 not in self.dircache:
1034
+ # collect parent directories
1035
+ par0 = self._parent(par0)
1036
+ subdirs.append(par0)
1037
+
1038
+ subdirs = subdirs[::-1]
1039
+ for parent, child in zip(subdirs, subdirs[1:]):
1040
+ # register newly discovered directories
1041
+ assert child not in self.dircache
1042
+ assert parent in self.dircache
1043
+ self.dircache[parent].append(
1044
+ {"name": child, "type": "directory", "size": 0}
1045
+ )
1046
+ self.dircache[child] = []
1047
+
1048
+ self.dircache[par].append({"name": path, "type": "file", "size": size})
1049
+
1050
+ def _open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
1051
+ data = self.cat_file(path) # load whole chunk into memory
1052
+ return io.BytesIO(data)
1053
+
1054
+ def ls(self, path, detail=True, **kwargs):
1055
+ path = self._strip_protocol(path)
1056
+ if isinstance(self.references, LazyReferenceMapper):
1057
+ try:
1058
+ return self.references.ls(path, detail)
1059
+ except KeyError:
1060
+ pass
1061
+ raise FileNotFoundError(f"'{path}' is not a known key")
1062
+ if not self.dircache:
1063
+ self._dircache_from_items()
1064
+ out = self._ls_from_cache(path)
1065
+ if out is None:
1066
+ raise FileNotFoundError(path)
1067
+ if detail:
1068
+ return out
1069
+ return [o["name"] for o in out]
1070
+
1071
+ def exists(self, path, **kwargs): # overwrite auto-sync version
1072
+ return self.isdir(path) or self.isfile(path)
1073
+
1074
+ def isdir(self, path): # overwrite auto-sync version
1075
+ if self.dircache:
1076
+ return path in self.dircache
1077
+ elif isinstance(self.references, LazyReferenceMapper):
1078
+ return path in self.references.listdir("")
1079
+ else:
1080
+ # this may be faster than building dircache for single calls, but
1081
+ # by looping will be slow for many calls; could cache it?
1082
+ return any(_.startswith(f"{path}/") for _ in self.references)
1083
+
1084
+ def isfile(self, path): # overwrite auto-sync version
1085
+ return path in self.references
1086
+
1087
+ async def _ls(self, path, detail=True, **kwargs): # calls fast sync code
1088
+ return self.ls(path, detail, **kwargs)
1089
+
1090
+ def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
1091
+ if withdirs:
1092
+ return super().find(
1093
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs
1094
+ )
1095
+ if path:
1096
+ path = self._strip_protocol(path)
1097
+ r = sorted(k for k in self.references if k.startswith(path))
1098
+ else:
1099
+ r = sorted(self.references)
1100
+ if detail:
1101
+ if not self.dircache:
1102
+ self._dircache_from_items()
1103
+ return {k: self._ls_from_cache(k)[0] for k in r}
1104
+ else:
1105
+ return r
1106
+
1107
+ def info(self, path, **kwargs):
1108
+ out = self.references.get(path)
1109
+ if out is not None:
1110
+ if isinstance(out, (str, bytes)):
1111
+ # decode base64 here
1112
+ return {"name": path, "type": "file", "size": len(out)}
1113
+ elif len(out) > 1:
1114
+ return {"name": path, "type": "file", "size": out[2]}
1115
+ else:
1116
+ out0 = [{"name": path, "type": "file", "size": None}]
1117
+ else:
1118
+ out = self.ls(path, True)
1119
+ out0 = [o for o in out if o["name"] == path]
1120
+ if not out0:
1121
+ return {"name": path, "type": "directory", "size": 0}
1122
+ if out0[0]["size"] is None:
1123
+ # if this is a whole remote file, update size using remote FS
1124
+ prot, _ = split_protocol(self.references[path][0])
1125
+ out0[0]["size"] = self.fss[prot].size(self.references[path][0])
1126
+ return out0[0]
1127
+
1128
+ async def _info(self, path, **kwargs): # calls fast sync code
1129
+ return self.info(path)
1130
+
1131
+ async def _rm_file(self, path, **kwargs):
1132
+ self.references.pop(
1133
+ path, None
1134
+ ) # ignores FileNotFound, just as well for directories
1135
+ self.dircache.clear() # this is a bit heavy handed
1136
+
1137
+ async def _pipe_file(self, path, data):
1138
+ # can be str or bytes
1139
+ self.references[path] = data
1140
+ self.dircache.clear() # this is a bit heavy handed
1141
+
1142
+ async def _put_file(self, lpath, rpath, **kwargs):
1143
+ # puts binary
1144
+ with open(lpath, "rb") as f:
1145
+ self.references[rpath] = f.read()
1146
+ self.dircache.clear() # this is a bit heavy handed
1147
+
1148
+ def save_json(self, url, **storage_options):
1149
+ """Write modified references into new location"""
1150
+ out = {}
1151
+ for k, v in self.references.items():
1152
+ if isinstance(v, bytes):
1153
+ try:
1154
+ out[k] = v.decode("ascii")
1155
+ except UnicodeDecodeError:
1156
+ out[k] = (b"base64:" + base64.b64encode(v)).decode()
1157
+ else:
1158
+ out[k] = v
1159
+ with fsspec.open(url, "wb", **storage_options) as f:
1160
+ f.write(json.dumps({"version": 1, "refs": out}).encode())
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/smb.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains SMBFileSystem class responsible for handling access to
3
+ Windows Samba network shares by using package smbprotocol
4
+ """
5
+
6
+ import datetime
7
+ import uuid
8
+ from stat import S_ISDIR, S_ISLNK
9
+
10
+ import smbclient
11
+
12
+ from .. import AbstractFileSystem
13
+ from ..utils import infer_storage_options
14
+
15
+ # ! pylint: disable=bad-continuation
16
+
17
+
18
+ class SMBFileSystem(AbstractFileSystem):
19
+ """Allow reading and writing to Windows and Samba network shares.
20
+
21
+ When using `fsspec.open()` for getting a file-like object the URI
22
+ should be specified as this format:
23
+ ``smb://workgroup;user:password@server:port/share/folder/file.csv``.
24
+
25
+ Example::
26
+
27
+ >>> import fsspec
28
+ >>> with fsspec.open(
29
+ ... 'smb://myuser:[email protected]/' 'share/folder/file.csv'
30
+ ... ) as smbfile:
31
+ ... df = pd.read_csv(smbfile, sep='|', header=None)
32
+
33
+ Note that you need to pass in a valid hostname or IP address for the host
34
+ component of the URL. Do not use the Windows/NetBIOS machine name for the
35
+ host component.
36
+
37
+ The first component of the path in the URL points to the name of the shared
38
+ folder. Subsequent path components will point to the directory/folder/file.
39
+
40
+ The URL components ``workgroup`` , ``user``, ``password`` and ``port`` may be
41
+ optional.
42
+
43
+ .. note::
44
+
45
+ For working this source require `smbprotocol`_ to be installed, e.g.::
46
+
47
+ $ pip install smbprotocol
48
+ # or
49
+ # pip install smbprotocol[kerberos]
50
+
51
+ .. _smbprotocol: https://github.com/jborean93/smbprotocol#requirements
52
+
53
+ Note: if using this with the ``open`` or ``open_files``, with full URLs,
54
+ there is no way to tell if a path is relative, so all paths are assumed
55
+ to be absolute.
56
+ """
57
+
58
+ protocol = "smb"
59
+
60
+ # pylint: disable=too-many-arguments
61
+ def __init__(
62
+ self,
63
+ host,
64
+ port=None,
65
+ username=None,
66
+ password=None,
67
+ timeout=60,
68
+ encrypt=None,
69
+ share_access=None,
70
+ **kwargs,
71
+ ):
72
+ """
73
+ You can use _get_kwargs_from_urls to get some kwargs from
74
+ a reasonable SMB url.
75
+
76
+ Authentication will be anonymous or integrated if username/password are not
77
+ given.
78
+
79
+ Parameters
80
+ ----------
81
+ host: str
82
+ The remote server name/ip to connect to
83
+ port: int or None
84
+ Port to connect with. Usually 445, sometimes 139.
85
+ username: str or None
86
+ Username to connect with. Required if Kerberos auth is not being used.
87
+ password: str or None
88
+ User's password on the server, if using username
89
+ timeout: int
90
+ Connection timeout in seconds
91
+ encrypt: bool
92
+ Whether to force encryption or not, once this has been set to True
93
+ the session cannot be changed back to False.
94
+ share_access: str or None
95
+ Specifies the default access applied to file open operations
96
+ performed with this file system object.
97
+ This affects whether other processes can concurrently open a handle
98
+ to the same file.
99
+
100
+ - None (the default): exclusively locks the file until closed.
101
+ - 'r': Allow other handles to be opened with read access.
102
+ - 'w': Allow other handles to be opened with write access.
103
+ - 'd': Allow other handles to be opened with delete access.
104
+ """
105
+ super().__init__(**kwargs)
106
+ self.host = host
107
+ self.port = port
108
+ self.username = username
109
+ self.password = password
110
+ self.timeout = timeout
111
+ self.encrypt = encrypt
112
+ self.temppath = kwargs.pop("temppath", "")
113
+ self.share_access = share_access
114
+ self._connect()
115
+
116
+ @property
117
+ def _port(self):
118
+ return 445 if self.port is None else self.port
119
+
120
+ def _connect(self):
121
+ smbclient.register_session(
122
+ self.host,
123
+ username=self.username,
124
+ password=self.password,
125
+ port=self._port,
126
+ encrypt=self.encrypt,
127
+ connection_timeout=self.timeout,
128
+ )
129
+
130
+ @classmethod
131
+ def _strip_protocol(cls, path):
132
+ return infer_storage_options(path)["path"]
133
+
134
+ @staticmethod
135
+ def _get_kwargs_from_urls(path):
136
+ # smb://workgroup;user:password@host:port/share/folder/file.csv
137
+ out = infer_storage_options(path)
138
+ out.pop("path", None)
139
+ out.pop("protocol", None)
140
+ return out
141
+
142
+ def mkdir(self, path, create_parents=True, **kwargs):
143
+ wpath = _as_unc_path(self.host, path)
144
+ if create_parents:
145
+ smbclient.makedirs(wpath, exist_ok=False, port=self._port, **kwargs)
146
+ else:
147
+ smbclient.mkdir(wpath, port=self._port, **kwargs)
148
+
149
+ def makedirs(self, path, exist_ok=False):
150
+ if _share_has_path(path):
151
+ wpath = _as_unc_path(self.host, path)
152
+ smbclient.makedirs(wpath, exist_ok=exist_ok, port=self._port)
153
+
154
+ def rmdir(self, path):
155
+ if _share_has_path(path):
156
+ wpath = _as_unc_path(self.host, path)
157
+ smbclient.rmdir(wpath, port=self._port)
158
+
159
+ def info(self, path, **kwargs):
160
+ wpath = _as_unc_path(self.host, path)
161
+ stats = smbclient.stat(wpath, port=self._port, **kwargs)
162
+ if S_ISDIR(stats.st_mode):
163
+ stype = "directory"
164
+ elif S_ISLNK(stats.st_mode):
165
+ stype = "link"
166
+ else:
167
+ stype = "file"
168
+ res = {
169
+ "name": path + "/" if stype == "directory" else path,
170
+ "size": stats.st_size,
171
+ "type": stype,
172
+ "uid": stats.st_uid,
173
+ "gid": stats.st_gid,
174
+ "time": stats.st_atime,
175
+ "mtime": stats.st_mtime,
176
+ }
177
+ return res
178
+
179
+ def created(self, path):
180
+ """Return the created timestamp of a file as a datetime.datetime"""
181
+ wpath = _as_unc_path(self.host, path)
182
+ stats = smbclient.stat(wpath, port=self._port)
183
+ return datetime.datetime.fromtimestamp(stats.st_ctime, tz=datetime.timezone.utc)
184
+
185
+ def modified(self, path):
186
+ """Return the modified timestamp of a file as a datetime.datetime"""
187
+ wpath = _as_unc_path(self.host, path)
188
+ stats = smbclient.stat(wpath, port=self._port)
189
+ return datetime.datetime.fromtimestamp(stats.st_mtime, tz=datetime.timezone.utc)
190
+
191
+ def ls(self, path, detail=True, **kwargs):
192
+ unc = _as_unc_path(self.host, path)
193
+ listed = smbclient.listdir(unc, port=self._port, **kwargs)
194
+ dirs = ["/".join([path.rstrip("/"), p]) for p in listed]
195
+ if detail:
196
+ dirs = [self.info(d) for d in dirs]
197
+ return dirs
198
+
199
+ # pylint: disable=too-many-arguments
200
+ def _open(
201
+ self,
202
+ path,
203
+ mode="rb",
204
+ block_size=-1,
205
+ autocommit=True,
206
+ cache_options=None,
207
+ **kwargs,
208
+ ):
209
+ """
210
+ block_size: int or None
211
+ If 0, no buffering, 1, line buffering, >1, buffer that many bytes
212
+
213
+ Notes
214
+ -----
215
+ By specifying 'share_access' in 'kwargs' it is possible to override the
216
+ default shared access setting applied in the constructor of this object.
217
+ """
218
+ bls = block_size if block_size is not None and block_size >= 0 else -1
219
+ wpath = _as_unc_path(self.host, path)
220
+ share_access = kwargs.pop("share_access", self.share_access)
221
+ if "w" in mode and autocommit is False:
222
+ temp = _as_temp_path(self.host, path, self.temppath)
223
+ return SMBFileOpener(
224
+ wpath, temp, mode, port=self._port, block_size=bls, **kwargs
225
+ )
226
+ return smbclient.open_file(
227
+ wpath,
228
+ mode,
229
+ buffering=bls,
230
+ share_access=share_access,
231
+ port=self._port,
232
+ **kwargs,
233
+ )
234
+
235
+ def copy(self, path1, path2, **kwargs):
236
+ """Copy within two locations in the same filesystem"""
237
+ wpath1 = _as_unc_path(self.host, path1)
238
+ wpath2 = _as_unc_path(self.host, path2)
239
+ smbclient.copyfile(wpath1, wpath2, port=self._port, **kwargs)
240
+
241
+ def _rm(self, path):
242
+ if _share_has_path(path):
243
+ wpath = _as_unc_path(self.host, path)
244
+ stats = smbclient.stat(wpath, port=self._port)
245
+ if S_ISDIR(stats.st_mode):
246
+ smbclient.rmdir(wpath, port=self._port)
247
+ else:
248
+ smbclient.remove(wpath, port=self._port)
249
+
250
+ def mv(self, path1, path2, recursive=None, maxdepth=None, **kwargs):
251
+ wpath1 = _as_unc_path(self.host, path1)
252
+ wpath2 = _as_unc_path(self.host, path2)
253
+ smbclient.rename(wpath1, wpath2, port=self._port, **kwargs)
254
+
255
+
256
+ def _as_unc_path(host, path):
257
+ rpath = path.replace("/", "\\")
258
+ unc = f"\\\\{host}{rpath}"
259
+ return unc
260
+
261
+
262
+ def _as_temp_path(host, path, temppath):
263
+ share = path.split("/")[1]
264
+ temp_file = f"/{share}{temppath}/{uuid.uuid4()}"
265
+ unc = _as_unc_path(host, temp_file)
266
+ return unc
267
+
268
+
269
+ def _share_has_path(path):
270
+ parts = path.count("/")
271
+ if path.endswith("/"):
272
+ return parts > 2
273
+ return parts > 1
274
+
275
+
276
+ class SMBFileOpener:
277
+ """writes to remote temporary file, move on commit"""
278
+
279
+ def __init__(self, path, temp, mode, port=445, block_size=-1, **kwargs):
280
+ self.path = path
281
+ self.temp = temp
282
+ self.mode = mode
283
+ self.block_size = block_size
284
+ self.kwargs = kwargs
285
+ self.smbfile = None
286
+ self._incontext = False
287
+ self.port = port
288
+ self._open()
289
+
290
+ def _open(self):
291
+ if self.smbfile is None or self.smbfile.closed:
292
+ self.smbfile = smbclient.open_file(
293
+ self.temp,
294
+ self.mode,
295
+ port=self.port,
296
+ buffering=self.block_size,
297
+ **self.kwargs,
298
+ )
299
+
300
+ def commit(self):
301
+ """Move temp file to definitive on success."""
302
+ # TODO: use transaction support in SMB protocol
303
+ smbclient.replace(self.temp, self.path, port=self.port)
304
+
305
+ def discard(self):
306
+ """Remove the temp file on failure."""
307
+ smbclient.remove(self.temp, port=self.port)
308
+
309
+ def __fspath__(self):
310
+ return self.path
311
+
312
+ def __iter__(self):
313
+ return self.smbfile.__iter__()
314
+
315
+ def __getattr__(self, item):
316
+ return getattr(self.smbfile, item)
317
+
318
+ def __enter__(self):
319
+ self._incontext = True
320
+ return self.smbfile.__enter__()
321
+
322
+ def __exit__(self, exc_type, exc_value, traceback):
323
+ self._incontext = False
324
+ self.smbfile.__exit__(exc_type, exc_value, traceback)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/tar.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import tarfile
3
+
4
+ import fsspec
5
+ from fsspec.archive import AbstractArchiveFileSystem
6
+ from fsspec.compression import compr
7
+ from fsspec.utils import infer_compression
8
+
9
+ typemap = {b"0": "file", b"5": "directory"}
10
+
11
+ logger = logging.getLogger("tar")
12
+
13
+
14
+ class TarFileSystem(AbstractArchiveFileSystem):
15
+ """Compressed Tar archives as a file-system (read-only)
16
+
17
+ Supports the following formats:
18
+ tar.gz, tar.bz2, tar.xz
19
+ """
20
+
21
+ root_marker = ""
22
+ protocol = "tar"
23
+ cachable = False
24
+
25
+ def __init__(
26
+ self,
27
+ fo="",
28
+ index_store=None,
29
+ target_options=None,
30
+ target_protocol=None,
31
+ compression=None,
32
+ **kwargs,
33
+ ):
34
+ super().__init__(**kwargs)
35
+ target_options = target_options or {}
36
+
37
+ if isinstance(fo, str):
38
+ self.of = fsspec.open(fo, protocol=target_protocol, **target_options)
39
+ fo = self.of.open() # keep the reference
40
+
41
+ # Try to infer compression.
42
+ if compression is None:
43
+ name = None
44
+
45
+ # Try different ways to get hold of the filename. `fo` might either
46
+ # be a `fsspec.LocalFileOpener`, an `io.BufferedReader` or an
47
+ # `fsspec.AbstractFileSystem` instance.
48
+ try:
49
+ # Amended io.BufferedReader or similar.
50
+ # This uses a "protocol extension" where original filenames are
51
+ # propagated to archive-like filesystems in order to let them
52
+ # infer the right compression appropriately.
53
+ if hasattr(fo, "original"):
54
+ name = fo.original
55
+
56
+ # fsspec.LocalFileOpener
57
+ elif hasattr(fo, "path"):
58
+ name = fo.path
59
+
60
+ # io.BufferedReader
61
+ elif hasattr(fo, "name"):
62
+ name = fo.name
63
+
64
+ # fsspec.AbstractFileSystem
65
+ elif hasattr(fo, "info"):
66
+ name = fo.info()["name"]
67
+
68
+ except Exception as ex:
69
+ logger.warning(
70
+ f"Unable to determine file name, not inferring compression: {ex}"
71
+ )
72
+
73
+ if name is not None:
74
+ compression = infer_compression(name)
75
+ logger.info(f"Inferred compression {compression} from file name {name}")
76
+
77
+ if compression is not None:
78
+ # TODO: tarfile already implements compression with modes like "'r:gz'",
79
+ # but then would seek to offset in the file work?
80
+ fo = compr[compression](fo)
81
+
82
+ self._fo_ref = fo
83
+ self.fo = fo # the whole instance is a context
84
+ self.tar = tarfile.TarFile(fileobj=self.fo)
85
+ self.dir_cache = None
86
+
87
+ self.index_store = index_store
88
+ self.index = None
89
+ self._index()
90
+
91
+ def _index(self):
92
+ # TODO: load and set saved index, if exists
93
+ out = {}
94
+ for ti in self.tar:
95
+ info = ti.get_info()
96
+ info["type"] = typemap.get(info["type"], "file")
97
+ name = ti.get_info()["name"].rstrip("/")
98
+ out[name] = (info, ti.offset_data)
99
+
100
+ self.index = out
101
+ # TODO: save index to self.index_store here, if set
102
+
103
+ def _get_dirs(self):
104
+ if self.dir_cache is not None:
105
+ return
106
+
107
+ # This enables ls to get directories as children as well as files
108
+ self.dir_cache = {
109
+ dirname: {"name": dirname, "size": 0, "type": "directory"}
110
+ for dirname in self._all_dirnames(self.tar.getnames())
111
+ }
112
+ for member in self.tar.getmembers():
113
+ info = member.get_info()
114
+ info["name"] = info["name"].rstrip("/")
115
+ info["type"] = typemap.get(info["type"], "file")
116
+ self.dir_cache[info["name"]] = info
117
+
118
+ def _open(self, path, mode="rb", **kwargs):
119
+ if mode != "rb":
120
+ raise ValueError("Read-only filesystem implementation")
121
+ details, offset = self.index[path]
122
+ if details["type"] != "file":
123
+ raise ValueError("Can only handle regular files")
124
+ return self.tar.extractfile(path)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/webhdfs.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://hadoop.apache.org/docs/r1.0.4/webhdfs.html
2
+
3
+ import logging
4
+ import os
5
+ import secrets
6
+ import shutil
7
+ import tempfile
8
+ import uuid
9
+ from contextlib import suppress
10
+ from urllib.parse import quote
11
+
12
+ import requests
13
+
14
+ from ..spec import AbstractBufferedFile, AbstractFileSystem
15
+ from ..utils import infer_storage_options, tokenize
16
+
17
+ logger = logging.getLogger("webhdfs")
18
+
19
+
20
+ class WebHDFS(AbstractFileSystem):
21
+ """
22
+ Interface to HDFS over HTTP using the WebHDFS API. Supports also HttpFS gateways.
23
+
24
+ Four auth mechanisms are supported:
25
+
26
+ insecure: no auth is done, and the user is assumed to be whoever they
27
+ say they are (parameter ``user``), or a predefined value such as
28
+ "dr.who" if not given
29
+ spnego: when kerberos authentication is enabled, auth is negotiated by
30
+ requests_kerberos https://github.com/requests/requests-kerberos .
31
+ This establishes a session based on existing kinit login and/or
32
+ specified principal/password; parameters are passed with ``kerb_kwargs``
33
+ token: uses an existing Hadoop delegation token from another secured
34
+ service. Indeed, this client can also generate such tokens when
35
+ not insecure. Note that tokens expire, but can be renewed (by a
36
+ previously specified user) and may allow for proxying.
37
+ basic-auth: used when both parameter ``user`` and parameter ``password``
38
+ are provided.
39
+
40
+ """
41
+
42
+ tempdir = str(tempfile.gettempdir())
43
+ protocol = "webhdfs", "webHDFS"
44
+
45
+ def __init__(
46
+ self,
47
+ host,
48
+ port=50070,
49
+ kerberos=False,
50
+ token=None,
51
+ user=None,
52
+ password=None,
53
+ proxy_to=None,
54
+ kerb_kwargs=None,
55
+ data_proxy=None,
56
+ use_https=False,
57
+ session_cert=None,
58
+ session_verify=True,
59
+ **kwargs,
60
+ ):
61
+ """
62
+ Parameters
63
+ ----------
64
+ host: str
65
+ Name-node address
66
+ port: int
67
+ Port for webHDFS
68
+ kerberos: bool
69
+ Whether to authenticate with kerberos for this connection
70
+ token: str or None
71
+ If given, use this token on every call to authenticate. A user
72
+ and user-proxy may be encoded in the token and should not be also
73
+ given
74
+ user: str or None
75
+ If given, assert the user name to connect with
76
+ password: str or None
77
+ If given, assert the password to use for basic auth. If password
78
+ is provided, user must be provided also
79
+ proxy_to: str or None
80
+ If given, the user has the authority to proxy, and this value is
81
+ the user in who's name actions are taken
82
+ kerb_kwargs: dict
83
+ Any extra arguments for HTTPKerberosAuth, see
84
+ `<https://github.com/requests/requests-kerberos/blob/master/requests_kerberos/kerberos_.py>`_
85
+ data_proxy: dict, callable or None
86
+ If given, map data-node addresses. This can be necessary if the
87
+ HDFS cluster is behind a proxy, running on Docker or otherwise has
88
+ a mismatch between the host-names given by the name-node and the
89
+ address by which to refer to them from the client. If a dict,
90
+ maps host names ``host->data_proxy[host]``; if a callable, full
91
+ URLs are passed, and function must conform to
92
+ ``url->data_proxy(url)``.
93
+ use_https: bool
94
+ Whether to connect to the Name-node using HTTPS instead of HTTP
95
+ session_cert: str or Tuple[str, str] or None
96
+ Path to a certificate file, or tuple of (cert, key) files to use
97
+ for the requests.Session
98
+ session_verify: str, bool or None
99
+ Path to a certificate file to use for verifying the requests.Session.
100
+ kwargs
101
+ """
102
+ if self._cached:
103
+ return
104
+ super().__init__(**kwargs)
105
+ self.url = (
106
+ f"{'https' if use_https else 'http'}://{host}:{port}/webhdfs/v1" # noqa
107
+ )
108
+ self.kerb = kerberos
109
+ self.kerb_kwargs = kerb_kwargs or {}
110
+ self.pars = {}
111
+ self.proxy = data_proxy or {}
112
+ if token is not None:
113
+ if user is not None or proxy_to is not None:
114
+ raise ValueError(
115
+ "If passing a delegation token, must not set "
116
+ "user or proxy_to, as these are encoded in the"
117
+ " token"
118
+ )
119
+ self.pars["delegation"] = token
120
+ self.user = user
121
+ self.password = password
122
+
123
+ if password is not None:
124
+ if user is None:
125
+ raise ValueError(
126
+ "If passing a password, the user must also be"
127
+ "set in order to set up the basic-auth"
128
+ )
129
+ else:
130
+ if user is not None:
131
+ self.pars["user.name"] = user
132
+
133
+ if proxy_to is not None:
134
+ self.pars["doas"] = proxy_to
135
+ if kerberos and user is not None:
136
+ raise ValueError(
137
+ "If using Kerberos auth, do not specify the "
138
+ "user, this is handled by kinit."
139
+ )
140
+
141
+ self.session_cert = session_cert
142
+ self.session_verify = session_verify
143
+
144
+ self._connect()
145
+
146
+ self._fsid = f"webhdfs_{tokenize(host, port)}"
147
+
148
+ @property
149
+ def fsid(self):
150
+ return self._fsid
151
+
152
+ def _connect(self):
153
+ self.session = requests.Session()
154
+
155
+ if self.session_cert:
156
+ self.session.cert = self.session_cert
157
+
158
+ self.session.verify = self.session_verify
159
+
160
+ if self.kerb:
161
+ from requests_kerberos import HTTPKerberosAuth
162
+
163
+ self.session.auth = HTTPKerberosAuth(**self.kerb_kwargs)
164
+
165
+ if self.user is not None and self.password is not None:
166
+ from requests.auth import HTTPBasicAuth
167
+
168
+ self.session.auth = HTTPBasicAuth(self.user, self.password)
169
+
170
+ def _call(self, op, method="get", path=None, data=None, redirect=True, **kwargs):
171
+ url = self._apply_proxy(self.url + quote(path or "", safe="/="))
172
+ args = kwargs.copy()
173
+ args.update(self.pars)
174
+ args["op"] = op.upper()
175
+ logger.debug("sending %s with %s", url, method)
176
+ out = self.session.request(
177
+ method=method.upper(),
178
+ url=url,
179
+ params=args,
180
+ data=data,
181
+ allow_redirects=redirect,
182
+ )
183
+ if out.status_code in [400, 401, 403, 404, 500]:
184
+ try:
185
+ err = out.json()
186
+ msg = err["RemoteException"]["message"]
187
+ exp = err["RemoteException"]["exception"]
188
+ except (ValueError, KeyError):
189
+ pass
190
+ else:
191
+ if exp in ["IllegalArgumentException", "UnsupportedOperationException"]:
192
+ raise ValueError(msg)
193
+ elif exp in ["SecurityException", "AccessControlException"]:
194
+ raise PermissionError(msg)
195
+ elif exp in ["FileNotFoundException"]:
196
+ raise FileNotFoundError(msg)
197
+ else:
198
+ raise RuntimeError(msg)
199
+ out.raise_for_status()
200
+ return out
201
+
202
+ def _open(
203
+ self,
204
+ path,
205
+ mode="rb",
206
+ block_size=None,
207
+ autocommit=True,
208
+ replication=None,
209
+ permissions=None,
210
+ **kwargs,
211
+ ):
212
+ """
213
+
214
+ Parameters
215
+ ----------
216
+ path: str
217
+ File location
218
+ mode: str
219
+ 'rb', 'wb', etc.
220
+ block_size: int
221
+ Client buffer size for read-ahead or write buffer
222
+ autocommit: bool
223
+ If False, writes to temporary file that only gets put in final
224
+ location upon commit
225
+ replication: int
226
+ Number of copies of file on the cluster, write mode only
227
+ permissions: str or int
228
+ posix permissions, write mode only
229
+ kwargs
230
+
231
+ Returns
232
+ -------
233
+ WebHDFile instance
234
+ """
235
+ block_size = block_size or self.blocksize
236
+ return WebHDFile(
237
+ self,
238
+ path,
239
+ mode=mode,
240
+ block_size=block_size,
241
+ tempdir=self.tempdir,
242
+ autocommit=autocommit,
243
+ replication=replication,
244
+ permissions=permissions,
245
+ )
246
+
247
+ @staticmethod
248
+ def _process_info(info):
249
+ info["type"] = info["type"].lower()
250
+ info["size"] = info["length"]
251
+ return info
252
+
253
+ @classmethod
254
+ def _strip_protocol(cls, path):
255
+ return infer_storage_options(path)["path"]
256
+
257
+ @staticmethod
258
+ def _get_kwargs_from_urls(urlpath):
259
+ out = infer_storage_options(urlpath)
260
+ out.pop("path", None)
261
+ out.pop("protocol", None)
262
+ if "username" in out:
263
+ out["user"] = out.pop("username")
264
+ return out
265
+
266
+ def info(self, path):
267
+ out = self._call("GETFILESTATUS", path=path)
268
+ info = out.json()["FileStatus"]
269
+ info["name"] = path
270
+ return self._process_info(info)
271
+
272
+ def ls(self, path, detail=False):
273
+ out = self._call("LISTSTATUS", path=path)
274
+ infos = out.json()["FileStatuses"]["FileStatus"]
275
+ for info in infos:
276
+ self._process_info(info)
277
+ info["name"] = path.rstrip("/") + "/" + info["pathSuffix"]
278
+ if detail:
279
+ return sorted(infos, key=lambda i: i["name"])
280
+ else:
281
+ return sorted(info["name"] for info in infos)
282
+
283
+ def content_summary(self, path):
284
+ """Total numbers of files, directories and bytes under path"""
285
+ out = self._call("GETCONTENTSUMMARY", path=path)
286
+ return out.json()["ContentSummary"]
287
+
288
+ def ukey(self, path):
289
+ """Checksum info of file, giving method and result"""
290
+ out = self._call("GETFILECHECKSUM", path=path, redirect=False)
291
+ if "Location" in out.headers:
292
+ location = self._apply_proxy(out.headers["Location"])
293
+ out2 = self.session.get(location)
294
+ out2.raise_for_status()
295
+ return out2.json()["FileChecksum"]
296
+ else:
297
+ out.raise_for_status()
298
+ return out.json()["FileChecksum"]
299
+
300
+ def home_directory(self):
301
+ """Get user's home directory"""
302
+ out = self._call("GETHOMEDIRECTORY")
303
+ return out.json()["Path"]
304
+
305
+ def get_delegation_token(self, renewer=None):
306
+ """Retrieve token which can give the same authority to other uses
307
+
308
+ Parameters
309
+ ----------
310
+ renewer: str or None
311
+ User who may use this token; if None, will be current user
312
+ """
313
+ if renewer:
314
+ out = self._call("GETDELEGATIONTOKEN", renewer=renewer)
315
+ else:
316
+ out = self._call("GETDELEGATIONTOKEN")
317
+ t = out.json()["Token"]
318
+ if t is None:
319
+ raise ValueError("No token available for this user/security context")
320
+ return t["urlString"]
321
+
322
+ def renew_delegation_token(self, token):
323
+ """Make token live longer. Returns new expiry time"""
324
+ out = self._call("RENEWDELEGATIONTOKEN", method="put", token=token)
325
+ return out.json()["long"]
326
+
327
+ def cancel_delegation_token(self, token):
328
+ """Stop the token from being useful"""
329
+ self._call("CANCELDELEGATIONTOKEN", method="put", token=token)
330
+
331
+ def chmod(self, path, mod):
332
+ """Set the permission at path
333
+
334
+ Parameters
335
+ ----------
336
+ path: str
337
+ location to set (file or directory)
338
+ mod: str or int
339
+ posix epresentation or permission, give as oct string, e.g, '777'
340
+ or 0o777
341
+ """
342
+ self._call("SETPERMISSION", method="put", path=path, permission=mod)
343
+
344
+ def chown(self, path, owner=None, group=None):
345
+ """Change owning user and/or group"""
346
+ kwargs = {}
347
+ if owner is not None:
348
+ kwargs["owner"] = owner
349
+ if group is not None:
350
+ kwargs["group"] = group
351
+ self._call("SETOWNER", method="put", path=path, **kwargs)
352
+
353
+ def set_replication(self, path, replication):
354
+ """
355
+ Set file replication factor
356
+
357
+ Parameters
358
+ ----------
359
+ path: str
360
+ File location (not for directories)
361
+ replication: int
362
+ Number of copies of file on the cluster. Should be smaller than
363
+ number of data nodes; normally 3 on most systems.
364
+ """
365
+ self._call("SETREPLICATION", path=path, method="put", replication=replication)
366
+
367
+ def mkdir(self, path, **kwargs):
368
+ self._call("MKDIRS", method="put", path=path)
369
+
370
+ def makedirs(self, path, exist_ok=False):
371
+ if exist_ok is False and self.exists(path):
372
+ raise FileExistsError(path)
373
+ self.mkdir(path)
374
+
375
+ def mv(self, path1, path2, **kwargs):
376
+ self._call("RENAME", method="put", path=path1, destination=path2)
377
+
378
+ def rm(self, path, recursive=False, **kwargs):
379
+ self._call(
380
+ "DELETE",
381
+ method="delete",
382
+ path=path,
383
+ recursive="true" if recursive else "false",
384
+ )
385
+
386
+ def rm_file(self, path, **kwargs):
387
+ self.rm(path)
388
+
389
+ def cp_file(self, lpath, rpath, **kwargs):
390
+ with self.open(lpath) as lstream:
391
+ tmp_fname = "/".join([self._parent(rpath), f".tmp.{secrets.token_hex(16)}"])
392
+ # Perform an atomic copy (stream to a temporary file and
393
+ # move it to the actual destination).
394
+ try:
395
+ with self.open(tmp_fname, "wb") as rstream:
396
+ shutil.copyfileobj(lstream, rstream)
397
+ self.mv(tmp_fname, rpath)
398
+ except BaseException: # noqa
399
+ with suppress(FileNotFoundError):
400
+ self.rm(tmp_fname)
401
+ raise
402
+
403
+ def _apply_proxy(self, location):
404
+ if self.proxy and callable(self.proxy):
405
+ location = self.proxy(location)
406
+ elif self.proxy:
407
+ # as a dict
408
+ for k, v in self.proxy.items():
409
+ location = location.replace(k, v, 1)
410
+ return location
411
+
412
+
413
+ class WebHDFile(AbstractBufferedFile):
414
+ """A file living in HDFS over webHDFS"""
415
+
416
+ def __init__(self, fs, path, **kwargs):
417
+ super().__init__(fs, path, **kwargs)
418
+ kwargs = kwargs.copy()
419
+ if kwargs.get("permissions", None) is None:
420
+ kwargs.pop("permissions", None)
421
+ if kwargs.get("replication", None) is None:
422
+ kwargs.pop("replication", None)
423
+ self.permissions = kwargs.pop("permissions", 511)
424
+ tempdir = kwargs.pop("tempdir")
425
+ if kwargs.pop("autocommit", False) is False:
426
+ self.target = self.path
427
+ self.path = os.path.join(tempdir, str(uuid.uuid4()))
428
+
429
+ def _upload_chunk(self, final=False):
430
+ """Write one part of a multi-block file upload
431
+
432
+ Parameters
433
+ ==========
434
+ final: bool
435
+ This is the last block, so should complete file, if
436
+ self.autocommit is True.
437
+ """
438
+ out = self.fs.session.post(
439
+ self.location,
440
+ data=self.buffer.getvalue(),
441
+ headers={"content-type": "application/octet-stream"},
442
+ )
443
+ out.raise_for_status()
444
+ return True
445
+
446
+ def _initiate_upload(self):
447
+ """Create remote file/upload"""
448
+ kwargs = self.kwargs.copy()
449
+ if "a" in self.mode:
450
+ op, method = "APPEND", "POST"
451
+ else:
452
+ op, method = "CREATE", "PUT"
453
+ kwargs["overwrite"] = "true"
454
+ out = self.fs._call(op, method, self.path, redirect=False, **kwargs)
455
+ location = self.fs._apply_proxy(out.headers["Location"])
456
+ if "w" in self.mode:
457
+ # create empty file to append to
458
+ out2 = self.fs.session.put(
459
+ location, headers={"content-type": "application/octet-stream"}
460
+ )
461
+ out2.raise_for_status()
462
+ # after creating empty file, change location to append to
463
+ out2 = self.fs._call("APPEND", "POST", self.path, redirect=False, **kwargs)
464
+ self.location = self.fs._apply_proxy(out2.headers["Location"])
465
+
466
+ def _fetch_range(self, start, end):
467
+ start = max(start, 0)
468
+ end = min(self.size, end)
469
+ if start >= end or start >= self.size:
470
+ return b""
471
+ out = self.fs._call(
472
+ "OPEN", path=self.path, offset=start, length=end - start, redirect=False
473
+ )
474
+ out.raise_for_status()
475
+ if "Location" in out.headers:
476
+ location = out.headers["Location"]
477
+ out2 = self.fs.session.get(self.fs._apply_proxy(location))
478
+ return out2.content
479
+ else:
480
+ return out.content
481
+
482
+ def commit(self):
483
+ self.fs.mv(self.path, self.target)
484
+
485
+ def discard(self):
486
+ self.fs.rm(self.path)
env-llmeval/lib/python3.10/site-packages/fsspec/implementations/zip.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import zipfile
2
+
3
+ import fsspec
4
+ from fsspec.archive import AbstractArchiveFileSystem
5
+
6
+
7
+ class ZipFileSystem(AbstractArchiveFileSystem):
8
+ """Read/Write contents of ZIP archive as a file-system
9
+
10
+ Keeps file object open while instance lives.
11
+
12
+ This class is pickleable, but not necessarily thread-safe
13
+ """
14
+
15
+ root_marker = ""
16
+ protocol = "zip"
17
+ cachable = False
18
+
19
+ def __init__(
20
+ self,
21
+ fo="",
22
+ mode="r",
23
+ target_protocol=None,
24
+ target_options=None,
25
+ compression=zipfile.ZIP_STORED,
26
+ allowZip64=True,
27
+ compresslevel=None,
28
+ **kwargs,
29
+ ):
30
+ """
31
+ Parameters
32
+ ----------
33
+ fo: str or file-like
34
+ Contains ZIP, and must exist. If a str, will fetch file using
35
+ :meth:`~fsspec.open_files`, which must return one file exactly.
36
+ mode: str
37
+ Accept: "r", "w", "a"
38
+ target_protocol: str (optional)
39
+ If ``fo`` is a string, this value can be used to override the
40
+ FS protocol inferred from a URL
41
+ target_options: dict (optional)
42
+ Kwargs passed when instantiating the target FS, if ``fo`` is
43
+ a string.
44
+ compression, allowZip64, compresslevel: passed to ZipFile
45
+ Only relevant when creating a ZIP
46
+ """
47
+ super().__init__(self, **kwargs)
48
+ if mode not in set("rwa"):
49
+ raise ValueError(f"mode '{mode}' no understood")
50
+ self.mode = mode
51
+ if isinstance(fo, str):
52
+ if mode == "a":
53
+ m = "r+b"
54
+ else:
55
+ m = mode + "b"
56
+ fo = fsspec.open(
57
+ fo, mode=m, protocol=target_protocol, **(target_options or {})
58
+ )
59
+ self.of = fo
60
+ self.fo = fo.__enter__() # the whole instance is a context
61
+ self.zip = zipfile.ZipFile(
62
+ self.fo,
63
+ mode=mode,
64
+ compression=compression,
65
+ allowZip64=allowZip64,
66
+ compresslevel=compresslevel,
67
+ )
68
+ self.dir_cache = None
69
+
70
+ @classmethod
71
+ def _strip_protocol(cls, path):
72
+ # zip file paths are always relative to the archive root
73
+ return super()._strip_protocol(path).lstrip("/")
74
+
75
+ def __del__(self):
76
+ if hasattr(self, "zip"):
77
+ self.close()
78
+ del self.zip
79
+
80
+ def close(self):
81
+ """Commits any write changes to the file. Done on ``del`` too."""
82
+ self.zip.close()
83
+
84
+ def _get_dirs(self):
85
+ if self.dir_cache is None or self.mode in set("wa"):
86
+ # when writing, dir_cache is always in the ZipFile's attributes,
87
+ # not read from the file.
88
+ files = self.zip.infolist()
89
+ self.dir_cache = {
90
+ dirname.rstrip("/"): {
91
+ "name": dirname.rstrip("/"),
92
+ "size": 0,
93
+ "type": "directory",
94
+ }
95
+ for dirname in self._all_dirnames(self.zip.namelist())
96
+ }
97
+ for z in files:
98
+ f = {s: getattr(z, s, None) for s in zipfile.ZipInfo.__slots__}
99
+ f.update(
100
+ {
101
+ "name": z.filename.rstrip("/"),
102
+ "size": z.file_size,
103
+ "type": ("directory" if z.is_dir() else "file"),
104
+ }
105
+ )
106
+ self.dir_cache[f["name"]] = f
107
+
108
+ def pipe_file(self, path, value, **kwargs):
109
+ # override upstream, because we know the exact file size in this case
110
+ self.zip.writestr(path, value, **kwargs)
111
+
112
+ def _open(
113
+ self,
114
+ path,
115
+ mode="rb",
116
+ block_size=None,
117
+ autocommit=True,
118
+ cache_options=None,
119
+ **kwargs,
120
+ ):
121
+ path = self._strip_protocol(path)
122
+ if "r" in mode and self.mode in set("wa"):
123
+ if self.exists(path):
124
+ raise OSError("ZipFS can only be open for reading or writing, not both")
125
+ raise FileNotFoundError(path)
126
+ if "r" in self.mode and "w" in mode:
127
+ raise OSError("ZipFS can only be open for reading or writing, not both")
128
+ out = self.zip.open(path, mode.strip("b"))
129
+ if "r" in mode:
130
+ info = self.info(path)
131
+ out.size = info["size"]
132
+ out.name = info["name"]
133
+ return out
env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from hashlib import md5
3
+
4
+ import pytest
5
+
6
+ from fsspec.implementations.local import LocalFileSystem
7
+ from fsspec.tests.abstract.copy import AbstractCopyTests # noqa
8
+ from fsspec.tests.abstract.get import AbstractGetTests # noqa
9
+ from fsspec.tests.abstract.put import AbstractPutTests # noqa
10
+
11
+
12
+ class BaseAbstractFixtures:
13
+ """
14
+ Abstract base class containing fixtures that are used by but never need to
15
+ be overridden in derived filesystem-specific classes to run the abstract
16
+ tests on such filesystems.
17
+ """
18
+
19
+ @pytest.fixture
20
+ def fs_bulk_operations_scenario_0(self, fs, fs_join, fs_path):
21
+ """
22
+ Scenario on remote filesystem that is used for many cp/get/put tests.
23
+
24
+ Cleans up at the end of each test it which it is used.
25
+ """
26
+ source = self._bulk_operations_scenario_0(fs, fs_join, fs_path)
27
+ yield source
28
+ fs.rm(source, recursive=True)
29
+
30
+ @pytest.fixture
31
+ def fs_glob_edge_cases_files(self, fs, fs_join, fs_path):
32
+ """
33
+ Scenario on remote filesystem that is used for glob edge cases cp/get/put tests.
34
+
35
+ Cleans up at the end of each test it which it is used.
36
+ """
37
+ source = self._glob_edge_cases_files(fs, fs_join, fs_path)
38
+ yield source
39
+ fs.rm(source, recursive=True)
40
+
41
+ @pytest.fixture
42
+ def fs_dir_and_file_with_same_name_prefix(self, fs, fs_join, fs_path):
43
+ """
44
+ Scenario on remote filesystem that is used to check cp/get/put on directory
45
+ and file with the same name prefixes.
46
+
47
+ Cleans up at the end of each test it which it is used.
48
+ """
49
+ source = self._dir_and_file_with_same_name_prefix(fs, fs_join, fs_path)
50
+ yield source
51
+ fs.rm(source, recursive=True)
52
+
53
+ @pytest.fixture
54
+ def fs_10_files_with_hashed_names(self, fs, fs_join, fs_path):
55
+ """
56
+ Scenario on remote filesystem that is used to check cp/get/put files order
57
+ when source and destination are lists.
58
+
59
+ Cleans up at the end of each test it which it is used.
60
+ """
61
+ source = self._10_files_with_hashed_names(fs, fs_join, fs_path)
62
+ yield source
63
+ fs.rm(source, recursive=True)
64
+
65
+ @pytest.fixture
66
+ def fs_target(self, fs, fs_join, fs_path):
67
+ """
68
+ Return name of remote directory that does not yet exist to copy into.
69
+
70
+ Cleans up at the end of each test it which it is used.
71
+ """
72
+ target = fs_join(fs_path, "target")
73
+ yield target
74
+ if fs.exists(target):
75
+ fs.rm(target, recursive=True)
76
+
77
+ @pytest.fixture
78
+ def local_bulk_operations_scenario_0(self, local_fs, local_join, local_path):
79
+ """
80
+ Scenario on local filesystem that is used for many cp/get/put tests.
81
+
82
+ Cleans up at the end of each test it which it is used.
83
+ """
84
+ source = self._bulk_operations_scenario_0(local_fs, local_join, local_path)
85
+ yield source
86
+ local_fs.rm(source, recursive=True)
87
+
88
+ @pytest.fixture
89
+ def local_glob_edge_cases_files(self, local_fs, local_join, local_path):
90
+ """
91
+ Scenario on local filesystem that is used for glob edge cases cp/get/put tests.
92
+
93
+ Cleans up at the end of each test it which it is used.
94
+ """
95
+ source = self._glob_edge_cases_files(local_fs, local_join, local_path)
96
+ yield source
97
+ local_fs.rm(source, recursive=True)
98
+
99
+ @pytest.fixture
100
+ def local_dir_and_file_with_same_name_prefix(
101
+ self, local_fs, local_join, local_path
102
+ ):
103
+ """
104
+ Scenario on local filesystem that is used to check cp/get/put on directory
105
+ and file with the same name prefixes.
106
+
107
+ Cleans up at the end of each test it which it is used.
108
+ """
109
+ source = self._dir_and_file_with_same_name_prefix(
110
+ local_fs, local_join, local_path
111
+ )
112
+ yield source
113
+ local_fs.rm(source, recursive=True)
114
+
115
+ @pytest.fixture
116
+ def local_10_files_with_hashed_names(self, local_fs, local_join, local_path):
117
+ """
118
+ Scenario on local filesystem that is used to check cp/get/put files order
119
+ when source and destination are lists.
120
+
121
+ Cleans up at the end of each test it which it is used.
122
+ """
123
+ source = self._10_files_with_hashed_names(local_fs, local_join, local_path)
124
+ yield source
125
+ local_fs.rm(source, recursive=True)
126
+
127
+ @pytest.fixture
128
+ def local_target(self, local_fs, local_join, local_path):
129
+ """
130
+ Return name of local directory that does not yet exist to copy into.
131
+
132
+ Cleans up at the end of each test it which it is used.
133
+ """
134
+ target = local_join(local_path, "target")
135
+ yield target
136
+ if local_fs.exists(target):
137
+ local_fs.rm(target, recursive=True)
138
+
139
+ def _glob_edge_cases_files(self, some_fs, some_join, some_path):
140
+ """
141
+ Scenario that is used for glob edge cases cp/get/put tests.
142
+ Creates the following directory and file structure:
143
+
144
+ πŸ“ source
145
+ β”œβ”€β”€ πŸ“„ file1
146
+ β”œβ”€β”€ πŸ“„ file2
147
+ β”œβ”€β”€ πŸ“ subdir0
148
+ β”‚ β”œβ”€β”€ πŸ“„ subfile1
149
+ β”‚ β”œβ”€β”€ πŸ“„ subfile2
150
+ β”‚ └── πŸ“ nesteddir
151
+ β”‚ └── πŸ“„ nestedfile
152
+ └── πŸ“ subdir1
153
+ β”œβ”€β”€ πŸ“„ subfile1
154
+ β”œβ”€β”€ πŸ“„ subfile2
155
+ └── πŸ“ nesteddir
156
+ └── πŸ“„ nestedfile
157
+ """
158
+ source = some_join(some_path, "source")
159
+ some_fs.touch(some_join(source, "file1"))
160
+ some_fs.touch(some_join(source, "file2"))
161
+
162
+ for subdir_idx in range(2):
163
+ subdir = some_join(source, f"subdir{subdir_idx}")
164
+ nesteddir = some_join(subdir, "nesteddir")
165
+ some_fs.makedirs(nesteddir)
166
+ some_fs.touch(some_join(subdir, "subfile1"))
167
+ some_fs.touch(some_join(subdir, "subfile2"))
168
+ some_fs.touch(some_join(nesteddir, "nestedfile"))
169
+
170
+ return source
171
+
172
+ def _bulk_operations_scenario_0(self, some_fs, some_join, some_path):
173
+ """
174
+ Scenario that is used for many cp/get/put tests. Creates the following
175
+ directory and file structure:
176
+
177
+ πŸ“ source
178
+ β”œβ”€β”€ πŸ“„ file1
179
+ β”œβ”€β”€ πŸ“„ file2
180
+ └── πŸ“ subdir
181
+ β”œβ”€β”€ πŸ“„ subfile1
182
+ β”œβ”€β”€ πŸ“„ subfile2
183
+ └── πŸ“ nesteddir
184
+ └── πŸ“„ nestedfile
185
+ """
186
+ source = some_join(some_path, "source")
187
+ subdir = some_join(source, "subdir")
188
+ nesteddir = some_join(subdir, "nesteddir")
189
+ some_fs.makedirs(nesteddir)
190
+ some_fs.touch(some_join(source, "file1"))
191
+ some_fs.touch(some_join(source, "file2"))
192
+ some_fs.touch(some_join(subdir, "subfile1"))
193
+ some_fs.touch(some_join(subdir, "subfile2"))
194
+ some_fs.touch(some_join(nesteddir, "nestedfile"))
195
+ return source
196
+
197
+ def _dir_and_file_with_same_name_prefix(self, some_fs, some_join, some_path):
198
+ """
199
+ Scenario that is used to check cp/get/put on directory and file with
200
+ the same name prefixes. Creates the following directory and file structure:
201
+
202
+ πŸ“ source
203
+ β”œβ”€β”€ πŸ“„ subdir.txt
204
+ └── πŸ“ subdir
205
+ └── πŸ“„ subfile.txt
206
+ """
207
+ source = some_join(some_path, "source")
208
+ subdir = some_join(source, "subdir")
209
+ file = some_join(source, "subdir.txt")
210
+ subfile = some_join(subdir, "subfile.txt")
211
+ some_fs.makedirs(subdir)
212
+ some_fs.touch(file)
213
+ some_fs.touch(subfile)
214
+ return source
215
+
216
+ def _10_files_with_hashed_names(self, some_fs, some_join, some_path):
217
+ """
218
+ Scenario that is used to check cp/get/put files order when source and
219
+ destination are lists. Creates the following directory and file structure:
220
+
221
+ πŸ“ source
222
+ └── πŸ“„ {hashed([0-9])}.txt
223
+ """
224
+ source = some_join(some_path, "source")
225
+ for i in range(10):
226
+ hashed_i = md5(str(i).encode("utf-8")).hexdigest()
227
+ path = some_join(source, f"{hashed_i}.txt")
228
+ some_fs.pipe(path=path, value=f"{i}".encode("utf-8"))
229
+ return source
230
+
231
+
232
+ class AbstractFixtures(BaseAbstractFixtures):
233
+ """
234
+ Abstract base class containing fixtures that may be overridden in derived
235
+ filesystem-specific classes to run the abstract tests on such filesystems.
236
+
237
+ For any particular filesystem some of these fixtures must be overridden,
238
+ such as ``fs`` and ``fs_path``, and others may be overridden if the
239
+ default functions here are not appropriate, such as ``fs_join``.
240
+ """
241
+
242
+ @pytest.fixture
243
+ def fs(self):
244
+ raise NotImplementedError("This function must be overridden in derived classes")
245
+
246
+ @pytest.fixture
247
+ def fs_join(self):
248
+ """
249
+ Return a function that joins its arguments together into a path.
250
+
251
+ Most fsspec implementations join paths in a platform-dependent way,
252
+ but some will override this to always use a forward slash.
253
+ """
254
+ return os.path.join
255
+
256
+ @pytest.fixture
257
+ def fs_path(self):
258
+ raise NotImplementedError("This function must be overridden in derived classes")
259
+
260
+ @pytest.fixture(scope="class")
261
+ def local_fs(self):
262
+ # Maybe need an option for auto_mkdir=False? This is only relevant
263
+ # for certain implementations.
264
+ return LocalFileSystem(auto_mkdir=True)
265
+
266
+ @pytest.fixture
267
+ def local_join(self):
268
+ """
269
+ Return a function that joins its arguments together into a path, on
270
+ the local filesystem.
271
+ """
272
+ return os.path.join
273
+
274
+ @pytest.fixture
275
+ def local_path(self, tmpdir):
276
+ return tmpdir
277
+
278
+ @pytest.fixture
279
+ def supports_empty_directories(self):
280
+ """
281
+ Return whether this implementation supports empty directories.
282
+ """
283
+ return True
284
+
285
+ @pytest.fixture
286
+ def fs_sanitize_path(self):
287
+ return lambda x: x
env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-310.pyc ADDED
Binary file (10.8 kB). View file