applied-ai-018 commited on
Commit
06638a8
·
verified ·
1 Parent(s): c4396cd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/fsspec/asyn.py +1081 -0
  2. env-llmeval/lib/python3.10/site-packages/fsspec/callbacks.py +324 -0
  3. env-llmeval/lib/python3.10/site-packages/fsspec/transaction.py +85 -0
  4. env-llmeval/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/METADATA +35 -0
  5. env-llmeval/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL +5 -0
  6. env-llmeval/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/top_level.txt +1 -0
  7. env-llmeval/lib/python3.10/site-packages/responses-0.18.0.dist-info/INSTALLER +1 -0
  8. env-llmeval/lib/python3.10/site-packages/responses-0.18.0.dist-info/top_level.txt +1 -0
  9. env-llmeval/lib/python3.10/site-packages/safetensors/__init__.py +9 -0
  10. env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/mlx.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/safetensors/flax.py +138 -0
  17. env-llmeval/lib/python3.10/site-packages/safetensors/paddle.py +138 -0
  18. env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/INSTALLER +1 -0
  19. env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/LICENSE +153 -0
  20. env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/METADATA +307 -0
  21. env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD +0 -0
  22. env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL +5 -0
  23. env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/entry_points.txt +2 -0
  24. env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/top_level.txt +2 -0
  25. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/__main__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_monitor.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_gui.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/autonotebook.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/cli.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/dask.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/gui.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/keras.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/notebook.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/rich.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/std.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/tk.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/utils.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/version.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/tqdm/_main.py +9 -0
  49. env-llmeval/lib/python3.10/site-packages/tqdm/_monitor.py +95 -0
  50. env-llmeval/lib/python3.10/site-packages/tqdm/_tqdm_pandas.py +24 -0
env-llmeval/lib/python3.10/site-packages/fsspec/asyn.py ADDED
@@ -0,0 +1,1081 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import asyncio.events
3
+ import functools
4
+ import inspect
5
+ import io
6
+ import numbers
7
+ import os
8
+ import re
9
+ import threading
10
+ from contextlib import contextmanager
11
+ from glob import has_magic
12
+ from typing import TYPE_CHECKING, Iterable
13
+
14
+ from .callbacks import DEFAULT_CALLBACK
15
+ from .exceptions import FSTimeoutError
16
+ from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
17
+ from .spec import AbstractBufferedFile, AbstractFileSystem
18
+ from .utils import glob_translate, is_exception, other_paths
19
+
20
+ private = re.compile("_[^_]")
21
+ iothread = [None] # dedicated fsspec IO thread
22
+ loop = [None] # global event loop for any non-async instance
23
+ _lock = None # global lock placeholder
24
+ get_running_loop = asyncio.get_running_loop
25
+
26
+
27
+ def get_lock():
28
+ """Allocate or return a threading lock.
29
+
30
+ The lock is allocated on first use to allow setting one lock per forked process.
31
+ """
32
+ global _lock
33
+ if not _lock:
34
+ _lock = threading.Lock()
35
+ return _lock
36
+
37
+
38
+ def reset_lock():
39
+ """Reset the global lock.
40
+
41
+ This should be called only on the init of a forked process to reset the lock to
42
+ None, enabling the new forked process to get a new lock.
43
+ """
44
+ global _lock
45
+
46
+ iothread[0] = None
47
+ loop[0] = None
48
+ _lock = None
49
+
50
+
51
+ async def _runner(event, coro, result, timeout=None):
52
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
53
+ if timeout is not None:
54
+ coro = asyncio.wait_for(coro, timeout=timeout)
55
+ try:
56
+ result[0] = await coro
57
+ except Exception as ex:
58
+ result[0] = ex
59
+ finally:
60
+ event.set()
61
+
62
+
63
+ def sync(loop, func, *args, timeout=None, **kwargs):
64
+ """
65
+ Make loop run coroutine until it returns. Runs in other thread
66
+
67
+ Examples
68
+ --------
69
+ >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
70
+ timeout=timeout, **kwargs)
71
+ """
72
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
73
+ # NB: if the loop is not running *yet*, it is OK to submit work
74
+ # and we will wait for it
75
+ if loop is None or loop.is_closed():
76
+ raise RuntimeError("Loop is not running")
77
+ try:
78
+ loop0 = asyncio.events.get_running_loop()
79
+ if loop0 is loop:
80
+ raise NotImplementedError("Calling sync() from within a running loop")
81
+ except NotImplementedError:
82
+ raise
83
+ except RuntimeError:
84
+ pass
85
+ coro = func(*args, **kwargs)
86
+ result = [None]
87
+ event = threading.Event()
88
+ asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
89
+ while True:
90
+ # this loops allows thread to get interrupted
91
+ if event.wait(1):
92
+ break
93
+ if timeout is not None:
94
+ timeout -= 1
95
+ if timeout < 0:
96
+ raise FSTimeoutError
97
+
98
+ return_result = result[0]
99
+ if isinstance(return_result, asyncio.TimeoutError):
100
+ # suppress asyncio.TimeoutError, raise FSTimeoutError
101
+ raise FSTimeoutError from return_result
102
+ elif isinstance(return_result, BaseException):
103
+ raise return_result
104
+ else:
105
+ return return_result
106
+
107
+
108
+ def sync_wrapper(func, obj=None):
109
+ """Given a function, make so can be called in blocking contexts
110
+
111
+ Leave obj=None if defining within a class. Pass the instance if attaching
112
+ as an attribute of the instance.
113
+ """
114
+
115
+ @functools.wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ self = obj or args[0]
118
+ return sync(self.loop, func, *args, **kwargs)
119
+
120
+ return wrapper
121
+
122
+
123
+ @contextmanager
124
+ def _selector_policy():
125
+ original_policy = asyncio.get_event_loop_policy()
126
+ try:
127
+ if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
128
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
129
+
130
+ yield
131
+ finally:
132
+ asyncio.set_event_loop_policy(original_policy)
133
+
134
+
135
+ def get_loop():
136
+ """Create or return the default fsspec IO loop
137
+
138
+ The loop will be running on a separate thread.
139
+ """
140
+ if loop[0] is None:
141
+ with get_lock():
142
+ # repeat the check just in case the loop got filled between the
143
+ # previous two calls from another thread
144
+ if loop[0] is None:
145
+ with _selector_policy():
146
+ loop[0] = asyncio.new_event_loop()
147
+ th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
148
+ th.daemon = True
149
+ th.start()
150
+ iothread[0] = th
151
+ return loop[0]
152
+
153
+
154
+ if TYPE_CHECKING:
155
+ import resource
156
+
157
+ ResourceError = resource.error
158
+ else:
159
+ try:
160
+ import resource
161
+ except ImportError:
162
+ resource = None
163
+ ResourceError = OSError
164
+ else:
165
+ ResourceError = getattr(resource, "error", OSError)
166
+
167
+ _DEFAULT_BATCH_SIZE = 128
168
+ _NOFILES_DEFAULT_BATCH_SIZE = 1280
169
+
170
+
171
+ def _get_batch_size(nofiles=False):
172
+ from fsspec.config import conf
173
+
174
+ if nofiles:
175
+ if "nofiles_gather_batch_size" in conf:
176
+ return conf["nofiles_gather_batch_size"]
177
+ else:
178
+ if "gather_batch_size" in conf:
179
+ return conf["gather_batch_size"]
180
+ if nofiles:
181
+ return _NOFILES_DEFAULT_BATCH_SIZE
182
+ if resource is None:
183
+ return _DEFAULT_BATCH_SIZE
184
+
185
+ try:
186
+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
187
+ except (ImportError, ValueError, ResourceError):
188
+ return _DEFAULT_BATCH_SIZE
189
+
190
+ if soft_limit == resource.RLIM_INFINITY:
191
+ return -1
192
+ else:
193
+ return soft_limit // 8
194
+
195
+
196
+ def running_async() -> bool:
197
+ """Being executed by an event loop?"""
198
+ try:
199
+ asyncio.get_running_loop()
200
+ return True
201
+ except RuntimeError:
202
+ return False
203
+
204
+
205
+ async def _run_coros_in_chunks(
206
+ coros,
207
+ batch_size=None,
208
+ callback=DEFAULT_CALLBACK,
209
+ timeout=None,
210
+ return_exceptions=False,
211
+ nofiles=False,
212
+ ):
213
+ """Run the given coroutines in chunks.
214
+
215
+ Parameters
216
+ ----------
217
+ coros: list of coroutines to run
218
+ batch_size: int or None
219
+ Number of coroutines to submit/wait on simultaneously.
220
+ If -1, then it will not be any throttling. If
221
+ None, it will be inferred from _get_batch_size()
222
+ callback: fsspec.callbacks.Callback instance
223
+ Gets a relative_update when each coroutine completes
224
+ timeout: number or None
225
+ If given, each coroutine times out after this time. Note that, since
226
+ there are multiple batches, the total run time of this function will in
227
+ general be longer
228
+ return_exceptions: bool
229
+ Same meaning as in asyncio.gather
230
+ nofiles: bool
231
+ If inferring the batch_size, does this operation involve local files?
232
+ If yes, you normally expect smaller batches.
233
+ """
234
+
235
+ if batch_size is None:
236
+ batch_size = _get_batch_size(nofiles=nofiles)
237
+
238
+ if batch_size == -1:
239
+ batch_size = len(coros)
240
+
241
+ assert batch_size > 0
242
+ results = []
243
+ for start in range(0, len(coros), batch_size):
244
+ chunk = [
245
+ asyncio.Task(asyncio.wait_for(c, timeout=timeout))
246
+ for c in coros[start : start + batch_size]
247
+ ]
248
+ if callback is not DEFAULT_CALLBACK:
249
+ [
250
+ t.add_done_callback(lambda *_, **__: callback.relative_update(1))
251
+ for t in chunk
252
+ ]
253
+ results.extend(
254
+ await asyncio.gather(*chunk, return_exceptions=return_exceptions),
255
+ )
256
+ return results
257
+
258
+
259
+ # these methods should be implemented as async by any async-able backend
260
+ async_methods = [
261
+ "_ls",
262
+ "_cat_file",
263
+ "_get_file",
264
+ "_put_file",
265
+ "_rm_file",
266
+ "_cp_file",
267
+ "_pipe_file",
268
+ "_expand_path",
269
+ "_info",
270
+ "_isfile",
271
+ "_isdir",
272
+ "_exists",
273
+ "_walk",
274
+ "_glob",
275
+ "_find",
276
+ "_du",
277
+ "_size",
278
+ "_mkdir",
279
+ "_makedirs",
280
+ ]
281
+
282
+
283
+ class AsyncFileSystem(AbstractFileSystem):
284
+ """Async file operations, default implementations
285
+
286
+ Passes bulk operations to asyncio.gather for concurrent operation.
287
+
288
+ Implementations that have concurrent batch operations and/or async methods
289
+ should inherit from this class instead of AbstractFileSystem. Docstrings are
290
+ copied from the un-underscored method in AbstractFileSystem, if not given.
291
+ """
292
+
293
+ # note that methods do not have docstring here; they will be copied
294
+ # for _* methods and inferred for overridden methods.
295
+
296
+ async_impl = True
297
+ mirror_sync_methods = True
298
+ disable_throttling = False
299
+
300
+ def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
301
+ self.asynchronous = asynchronous
302
+ self._pid = os.getpid()
303
+ if not asynchronous:
304
+ self._loop = loop or get_loop()
305
+ else:
306
+ self._loop = None
307
+ self.batch_size = batch_size
308
+ super().__init__(*args, **kwargs)
309
+
310
+ @property
311
+ def loop(self):
312
+ if self._pid != os.getpid():
313
+ raise RuntimeError("This class is not fork-safe")
314
+ return self._loop
315
+
316
+ async def _rm_file(self, path, **kwargs):
317
+ raise NotImplementedError
318
+
319
+ async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
320
+ # TODO: implement on_error
321
+ batch_size = batch_size or self.batch_size
322
+ path = await self._expand_path(path, recursive=recursive)
323
+ return await _run_coros_in_chunks(
324
+ [self._rm_file(p, **kwargs) for p in reversed(path)],
325
+ batch_size=batch_size,
326
+ nofiles=True,
327
+ )
328
+
329
+ async def _cp_file(self, path1, path2, **kwargs):
330
+ raise NotImplementedError
331
+
332
+ async def _copy(
333
+ self,
334
+ path1,
335
+ path2,
336
+ recursive=False,
337
+ on_error=None,
338
+ maxdepth=None,
339
+ batch_size=None,
340
+ **kwargs,
341
+ ):
342
+ if on_error is None and recursive:
343
+ on_error = "ignore"
344
+ elif on_error is None:
345
+ on_error = "raise"
346
+
347
+ if isinstance(path1, list) and isinstance(path2, list):
348
+ # No need to expand paths when both source and destination
349
+ # are provided as lists
350
+ paths1 = path1
351
+ paths2 = path2
352
+ else:
353
+ source_is_str = isinstance(path1, str)
354
+ paths1 = await self._expand_path(
355
+ path1, maxdepth=maxdepth, recursive=recursive
356
+ )
357
+ if source_is_str and (not recursive or maxdepth is not None):
358
+ # Non-recursive glob does not copy directories
359
+ paths1 = [
360
+ p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
361
+ ]
362
+ if not paths1:
363
+ return
364
+
365
+ source_is_file = len(paths1) == 1
366
+ dest_is_dir = isinstance(path2, str) and (
367
+ trailing_sep(path2) or await self._isdir(path2)
368
+ )
369
+
370
+ exists = source_is_str and (
371
+ (has_magic(path1) and source_is_file)
372
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
373
+ )
374
+ paths2 = other_paths(
375
+ paths1,
376
+ path2,
377
+ exists=exists,
378
+ flatten=not source_is_str,
379
+ )
380
+
381
+ batch_size = batch_size or self.batch_size
382
+ coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
383
+ result = await _run_coros_in_chunks(
384
+ coros, batch_size=batch_size, return_exceptions=True, nofiles=True
385
+ )
386
+
387
+ for ex in filter(is_exception, result):
388
+ if on_error == "ignore" and isinstance(ex, FileNotFoundError):
389
+ continue
390
+ raise ex
391
+
392
+ async def _pipe_file(self, path, value, **kwargs):
393
+ raise NotImplementedError
394
+
395
+ async def _pipe(self, path, value=None, batch_size=None, **kwargs):
396
+ if isinstance(path, str):
397
+ path = {path: value}
398
+ batch_size = batch_size or self.batch_size
399
+ return await _run_coros_in_chunks(
400
+ [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
401
+ batch_size=batch_size,
402
+ nofiles=True,
403
+ )
404
+
405
+ async def _process_limits(self, url, start, end):
406
+ """Helper for "Range"-based _cat_file"""
407
+ size = None
408
+ suff = False
409
+ if start is not None and start < 0:
410
+ # if start is negative and end None, end is the "suffix length"
411
+ if end is None:
412
+ end = -start
413
+ start = ""
414
+ suff = True
415
+ else:
416
+ size = size or (await self._info(url))["size"]
417
+ start = size + start
418
+ elif start is None:
419
+ start = 0
420
+ if not suff:
421
+ if end is not None and end < 0:
422
+ if start is not None:
423
+ size = size or (await self._info(url))["size"]
424
+ end = size + end
425
+ elif end is None:
426
+ end = ""
427
+ if isinstance(end, numbers.Integral):
428
+ end -= 1 # bytes range is inclusive
429
+ return f"bytes={start}-{end}"
430
+
431
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
432
+ raise NotImplementedError
433
+
434
+ async def _cat(
435
+ self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
436
+ ):
437
+ paths = await self._expand_path(path, recursive=recursive)
438
+ coros = [self._cat_file(path, **kwargs) for path in paths]
439
+ batch_size = batch_size or self.batch_size
440
+ out = await _run_coros_in_chunks(
441
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
442
+ )
443
+ if on_error == "raise":
444
+ ex = next(filter(is_exception, out), False)
445
+ if ex:
446
+ raise ex
447
+ if (
448
+ len(paths) > 1
449
+ or isinstance(path, list)
450
+ or paths[0] != self._strip_protocol(path)
451
+ ):
452
+ return {
453
+ k: v
454
+ for k, v in zip(paths, out)
455
+ if on_error != "omit" or not is_exception(v)
456
+ }
457
+ else:
458
+ return out[0]
459
+
460
+ async def _cat_ranges(
461
+ self,
462
+ paths,
463
+ starts,
464
+ ends,
465
+ max_gap=None,
466
+ batch_size=None,
467
+ on_error="return",
468
+ **kwargs,
469
+ ):
470
+ """Get the contents of byte ranges from one or more files
471
+
472
+ Parameters
473
+ ----------
474
+ paths: list
475
+ A list of of filepaths on this filesystems
476
+ starts, ends: int or list
477
+ Bytes limits of the read. If using a single int, the same value will be
478
+ used to read all the specified files.
479
+ """
480
+ # TODO: on_error
481
+ if max_gap is not None:
482
+ # use utils.merge_offset_ranges
483
+ raise NotImplementedError
484
+ if not isinstance(paths, list):
485
+ raise TypeError
486
+ if not isinstance(starts, Iterable):
487
+ starts = [starts] * len(paths)
488
+ if not isinstance(ends, Iterable):
489
+ ends = [ends] * len(paths)
490
+ if len(starts) != len(paths) or len(ends) != len(paths):
491
+ raise ValueError
492
+ coros = [
493
+ self._cat_file(p, start=s, end=e, **kwargs)
494
+ for p, s, e in zip(paths, starts, ends)
495
+ ]
496
+ batch_size = batch_size or self.batch_size
497
+ return await _run_coros_in_chunks(
498
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
499
+ )
500
+
501
+ async def _put_file(self, lpath, rpath, **kwargs):
502
+ raise NotImplementedError
503
+
504
+ async def _put(
505
+ self,
506
+ lpath,
507
+ rpath,
508
+ recursive=False,
509
+ callback=DEFAULT_CALLBACK,
510
+ batch_size=None,
511
+ maxdepth=None,
512
+ **kwargs,
513
+ ):
514
+ """Copy file(s) from local.
515
+
516
+ Copies a specific file or tree of files (if recursive=True). If rpath
517
+ ends with a "/", it will be assumed to be a directory, and target files
518
+ will go within.
519
+
520
+ The put_file method will be called concurrently on a batch of files. The
521
+ batch_size option can configure the amount of futures that can be executed
522
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
523
+ The default can be set for this instance by passing "batch_size" in the
524
+ constructor, or for all instances by setting the "gather_batch_size" key
525
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
526
+ """
527
+ if isinstance(lpath, list) and isinstance(rpath, list):
528
+ # No need to expand paths when both source and destination
529
+ # are provided as lists
530
+ rpaths = rpath
531
+ lpaths = lpath
532
+ else:
533
+ source_is_str = isinstance(lpath, str)
534
+ if source_is_str:
535
+ lpath = make_path_posix(lpath)
536
+ fs = LocalFileSystem()
537
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
538
+ if source_is_str and (not recursive or maxdepth is not None):
539
+ # Non-recursive glob does not copy directories
540
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
541
+ if not lpaths:
542
+ return
543
+
544
+ source_is_file = len(lpaths) == 1
545
+ dest_is_dir = isinstance(rpath, str) and (
546
+ trailing_sep(rpath) or await self._isdir(rpath)
547
+ )
548
+
549
+ rpath = self._strip_protocol(rpath)
550
+ exists = source_is_str and (
551
+ (has_magic(lpath) and source_is_file)
552
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
553
+ )
554
+ rpaths = other_paths(
555
+ lpaths,
556
+ rpath,
557
+ exists=exists,
558
+ flatten=not source_is_str,
559
+ )
560
+
561
+ is_dir = {l: os.path.isdir(l) for l in lpaths}
562
+ rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
563
+ file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
564
+
565
+ await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
566
+ batch_size = batch_size or self.batch_size
567
+
568
+ coros = []
569
+ callback.set_size(len(file_pairs))
570
+ for lfile, rfile in file_pairs:
571
+ put_file = callback.branch_coro(self._put_file)
572
+ coros.append(put_file(lfile, rfile, **kwargs))
573
+
574
+ return await _run_coros_in_chunks(
575
+ coros, batch_size=batch_size, callback=callback
576
+ )
577
+
578
+ async def _get_file(self, rpath, lpath, **kwargs):
579
+ raise NotImplementedError
580
+
581
+ async def _get(
582
+ self,
583
+ rpath,
584
+ lpath,
585
+ recursive=False,
586
+ callback=DEFAULT_CALLBACK,
587
+ maxdepth=None,
588
+ **kwargs,
589
+ ):
590
+ """Copy file(s) to local.
591
+
592
+ Copies a specific file or tree of files (if recursive=True). If lpath
593
+ ends with a "/", it will be assumed to be a directory, and target files
594
+ will go within. Can submit a list of paths, which may be glob-patterns
595
+ and will be expanded.
596
+
597
+ The get_file method will be called concurrently on a batch of files. The
598
+ batch_size option can configure the amount of futures that can be executed
599
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
600
+ The default can be set for this instance by passing "batch_size" in the
601
+ constructor, or for all instances by setting the "gather_batch_size" key
602
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
603
+ """
604
+ if isinstance(lpath, list) and isinstance(rpath, list):
605
+ # No need to expand paths when both source and destination
606
+ # are provided as lists
607
+ rpaths = rpath
608
+ lpaths = lpath
609
+ else:
610
+ source_is_str = isinstance(rpath, str)
611
+ # First check for rpath trailing slash as _strip_protocol removes it.
612
+ source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
613
+ rpath = self._strip_protocol(rpath)
614
+ rpaths = await self._expand_path(
615
+ rpath, recursive=recursive, maxdepth=maxdepth
616
+ )
617
+ if source_is_str and (not recursive or maxdepth is not None):
618
+ # Non-recursive glob does not copy directories
619
+ rpaths = [
620
+ p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
621
+ ]
622
+ if not rpaths:
623
+ return
624
+
625
+ lpath = make_path_posix(lpath)
626
+ source_is_file = len(rpaths) == 1
627
+ dest_is_dir = isinstance(lpath, str) and (
628
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
629
+ )
630
+
631
+ exists = source_is_str and (
632
+ (has_magic(rpath) and source_is_file)
633
+ or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
634
+ )
635
+ lpaths = other_paths(
636
+ rpaths,
637
+ lpath,
638
+ exists=exists,
639
+ flatten=not source_is_str,
640
+ )
641
+
642
+ [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
643
+ batch_size = kwargs.pop("batch_size", self.batch_size)
644
+
645
+ coros = []
646
+ callback.set_size(len(lpaths))
647
+ for lpath, rpath in zip(lpaths, rpaths):
648
+ get_file = callback.branch_coro(self._get_file)
649
+ coros.append(get_file(rpath, lpath, **kwargs))
650
+ return await _run_coros_in_chunks(
651
+ coros, batch_size=batch_size, callback=callback
652
+ )
653
+
654
+ async def _isfile(self, path):
655
+ try:
656
+ return (await self._info(path))["type"] == "file"
657
+ except: # noqa: E722
658
+ return False
659
+
660
+ async def _isdir(self, path):
661
+ try:
662
+ return (await self._info(path))["type"] == "directory"
663
+ except OSError:
664
+ return False
665
+
666
+ async def _size(self, path):
667
+ return (await self._info(path)).get("size", None)
668
+
669
+ async def _sizes(self, paths, batch_size=None):
670
+ batch_size = batch_size or self.batch_size
671
+ return await _run_coros_in_chunks(
672
+ [self._size(p) for p in paths], batch_size=batch_size
673
+ )
674
+
675
+ async def _exists(self, path, **kwargs):
676
+ try:
677
+ await self._info(path, **kwargs)
678
+ return True
679
+ except FileNotFoundError:
680
+ return False
681
+
682
+ async def _info(self, path, **kwargs):
683
+ raise NotImplementedError
684
+
685
+ async def _ls(self, path, detail=True, **kwargs):
686
+ raise NotImplementedError
687
+
688
+ async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
689
+ if maxdepth is not None and maxdepth < 1:
690
+ raise ValueError("maxdepth must be at least 1")
691
+
692
+ path = self._strip_protocol(path)
693
+ full_dirs = {}
694
+ dirs = {}
695
+ files = {}
696
+
697
+ detail = kwargs.pop("detail", False)
698
+ try:
699
+ listing = await self._ls(path, detail=True, **kwargs)
700
+ except (FileNotFoundError, OSError) as e:
701
+ if on_error == "raise":
702
+ raise
703
+ elif callable(on_error):
704
+ on_error(e)
705
+ if detail:
706
+ yield path, {}, {}
707
+ else:
708
+ yield path, [], []
709
+ return
710
+
711
+ for info in listing:
712
+ # each info name must be at least [path]/part , but here
713
+ # we check also for names like [path]/part/
714
+ pathname = info["name"].rstrip("/")
715
+ name = pathname.rsplit("/", 1)[-1]
716
+ if info["type"] == "directory" and pathname != path:
717
+ # do not include "self" path
718
+ full_dirs[name] = pathname
719
+ dirs[name] = info
720
+ elif pathname == path:
721
+ # file-like with same name as give path
722
+ files[""] = info
723
+ else:
724
+ files[name] = info
725
+
726
+ if detail:
727
+ yield path, dirs, files
728
+ else:
729
+ yield path, list(dirs), list(files)
730
+
731
+ if maxdepth is not None:
732
+ maxdepth -= 1
733
+ if maxdepth < 1:
734
+ return
735
+
736
+ for d in dirs:
737
+ async for _ in self._walk(
738
+ full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
739
+ ):
740
+ yield _
741
+
742
+ async def _glob(self, path, maxdepth=None, **kwargs):
743
+ if maxdepth is not None and maxdepth < 1:
744
+ raise ValueError("maxdepth must be at least 1")
745
+
746
+ import re
747
+
748
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
749
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
750
+ path = self._strip_protocol(path)
751
+ append_slash_to_dirname = ends_with_sep or path.endswith(
752
+ tuple(sep + "**" for sep in seps)
753
+ )
754
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
755
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
756
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
757
+
758
+ min_idx = min(idx_star, idx_qmark, idx_brace)
759
+
760
+ detail = kwargs.pop("detail", False)
761
+
762
+ if not has_magic(path):
763
+ if await self._exists(path, **kwargs):
764
+ if not detail:
765
+ return [path]
766
+ else:
767
+ return {path: await self._info(path, **kwargs)}
768
+ else:
769
+ if not detail:
770
+ return [] # glob of non-existent returns empty
771
+ else:
772
+ return {}
773
+ elif "/" in path[:min_idx]:
774
+ min_idx = path[:min_idx].rindex("/")
775
+ root = path[: min_idx + 1]
776
+ depth = path[min_idx + 1 :].count("/") + 1
777
+ else:
778
+ root = ""
779
+ depth = path[min_idx + 1 :].count("/") + 1
780
+
781
+ if "**" in path:
782
+ if maxdepth is not None:
783
+ idx_double_stars = path.find("**")
784
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
785
+ depth = depth - depth_double_stars + maxdepth
786
+ else:
787
+ depth = None
788
+
789
+ allpaths = await self._find(
790
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
791
+ )
792
+
793
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
794
+ pattern = re.compile(pattern)
795
+
796
+ out = {
797
+ p: info
798
+ for p, info in sorted(allpaths.items())
799
+ if pattern.match(
800
+ (
801
+ p + "/"
802
+ if append_slash_to_dirname and info["type"] == "directory"
803
+ else p
804
+ )
805
+ )
806
+ }
807
+
808
+ if detail:
809
+ return out
810
+ else:
811
+ return list(out)
812
+
813
+ async def _du(self, path, total=True, maxdepth=None, **kwargs):
814
+ sizes = {}
815
+ # async for?
816
+ for f in await self._find(path, maxdepth=maxdepth, **kwargs):
817
+ info = await self._info(f)
818
+ sizes[info["name"]] = info["size"]
819
+ if total:
820
+ return sum(sizes.values())
821
+ else:
822
+ return sizes
823
+
824
+ async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
825
+ path = self._strip_protocol(path)
826
+ out = {}
827
+ detail = kwargs.pop("detail", False)
828
+
829
+ # Add the root directory if withdirs is requested
830
+ # This is needed for posix glob compliance
831
+ if withdirs and path != "" and await self._isdir(path):
832
+ out[path] = await self._info(path)
833
+
834
+ # async for?
835
+ async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
836
+ if withdirs:
837
+ files.update(dirs)
838
+ out.update({info["name"]: info for name, info in files.items()})
839
+ if not out and (await self._isfile(path)):
840
+ # walk works on directories, but find should also return [path]
841
+ # when path happens to be a file
842
+ out[path] = {}
843
+ names = sorted(out)
844
+ if not detail:
845
+ return names
846
+ else:
847
+ return {name: out[name] for name in names}
848
+
849
+ async def _expand_path(self, path, recursive=False, maxdepth=None):
850
+ if maxdepth is not None and maxdepth < 1:
851
+ raise ValueError("maxdepth must be at least 1")
852
+
853
+ if isinstance(path, str):
854
+ out = await self._expand_path([path], recursive, maxdepth)
855
+ else:
856
+ out = set()
857
+ path = [self._strip_protocol(p) for p in path]
858
+ for p in path: # can gather here
859
+ if has_magic(p):
860
+ bit = set(await self._glob(p, maxdepth=maxdepth))
861
+ out |= bit
862
+ if recursive:
863
+ # glob call above expanded one depth so if maxdepth is defined
864
+ # then decrement it in expand_path call below. If it is zero
865
+ # after decrementing then avoid expand_path call.
866
+ if maxdepth is not None and maxdepth <= 1:
867
+ continue
868
+ out |= set(
869
+ await self._expand_path(
870
+ list(bit),
871
+ recursive=recursive,
872
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
873
+ )
874
+ )
875
+ continue
876
+ elif recursive:
877
+ rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
878
+ out |= rec
879
+ if p not in out and (recursive is False or (await self._exists(p))):
880
+ # should only check once, for the root
881
+ out.add(p)
882
+ if not out:
883
+ raise FileNotFoundError(path)
884
+ return sorted(out)
885
+
886
+ async def _mkdir(self, path, create_parents=True, **kwargs):
887
+ pass # not necessary to implement, may not have directories
888
+
889
+ async def _makedirs(self, path, exist_ok=False):
890
+ pass # not necessary to implement, may not have directories
891
+
892
+ async def open_async(self, path, mode="rb", **kwargs):
893
+ if "b" not in mode or kwargs.get("compression"):
894
+ raise ValueError
895
+ raise NotImplementedError
896
+
897
+
898
+ def mirror_sync_methods(obj):
899
+ """Populate sync and async methods for obj
900
+
901
+ For each method will create a sync version if the name refers to an async method
902
+ (coroutine) and there is no override in the child class; will create an async
903
+ method for the corresponding sync method if there is no implementation.
904
+
905
+ Uses the methods specified in
906
+ - async_methods: the set that an implementation is expected to provide
907
+ - default_async_methods: that can be derived from their sync version in
908
+ AbstractFileSystem
909
+ - AsyncFileSystem: async-specific default coroutines
910
+ """
911
+ from fsspec import AbstractFileSystem
912
+
913
+ for method in async_methods + dir(AsyncFileSystem):
914
+ if not method.startswith("_"):
915
+ continue
916
+ smethod = method[1:]
917
+ if private.match(method):
918
+ isco = inspect.iscoroutinefunction(getattr(obj, method, None))
919
+ unsync = getattr(getattr(obj, smethod, False), "__func__", None)
920
+ is_default = unsync is getattr(AbstractFileSystem, smethod, "")
921
+ if isco and is_default:
922
+ mth = sync_wrapper(getattr(obj, method), obj=obj)
923
+ setattr(obj, smethod, mth)
924
+ if not mth.__doc__:
925
+ mth.__doc__ = getattr(
926
+ getattr(AbstractFileSystem, smethod, None), "__doc__", ""
927
+ )
928
+
929
+
930
+ class FSSpecCoroutineCancel(Exception):
931
+ pass
932
+
933
+
934
+ def _dump_running_tasks(
935
+ printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
936
+ ):
937
+ import traceback
938
+
939
+ tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
940
+ if printout:
941
+ [task.print_stack() for task in tasks]
942
+ out = [
943
+ {
944
+ "locals": task._coro.cr_frame.f_locals,
945
+ "file": task._coro.cr_frame.f_code.co_filename,
946
+ "firstline": task._coro.cr_frame.f_code.co_firstlineno,
947
+ "linelo": task._coro.cr_frame.f_lineno,
948
+ "stack": traceback.format_stack(task._coro.cr_frame),
949
+ "task": task if with_task else None,
950
+ }
951
+ for task in tasks
952
+ ]
953
+ if cancel:
954
+ for t in tasks:
955
+ cbs = t._callbacks
956
+ t.cancel()
957
+ asyncio.futures.Future.set_exception(t, exc)
958
+ asyncio.futures.Future.cancel(t)
959
+ [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
960
+ try:
961
+ t._coro.throw(exc) # exits coro, unless explicitly handled
962
+ except exc:
963
+ pass
964
+ return out
965
+
966
+
967
+ class AbstractAsyncStreamedFile(AbstractBufferedFile):
968
+ # no read buffering, and always auto-commit
969
+ # TODO: readahead might still be useful here, but needs async version
970
+
971
+ async def read(self, length=-1):
972
+ """
973
+ Return data from cache, or fetch pieces as necessary
974
+
975
+ Parameters
976
+ ----------
977
+ length: int (-1)
978
+ Number of bytes to read; if <0, all remaining bytes.
979
+ """
980
+ length = -1 if length is None else int(length)
981
+ if self.mode != "rb":
982
+ raise ValueError("File not in read mode")
983
+ if length < 0:
984
+ length = self.size - self.loc
985
+ if self.closed:
986
+ raise ValueError("I/O operation on closed file.")
987
+ if length == 0:
988
+ # don't even bother calling fetch
989
+ return b""
990
+ out = await self._fetch_range(self.loc, self.loc + length)
991
+ self.loc += len(out)
992
+ return out
993
+
994
+ async def write(self, data):
995
+ """
996
+ Write data to buffer.
997
+
998
+ Buffer only sent on flush() or if buffer is greater than
999
+ or equal to blocksize.
1000
+
1001
+ Parameters
1002
+ ----------
1003
+ data: bytes
1004
+ Set of bytes to be written.
1005
+ """
1006
+ if self.mode not in {"wb", "ab"}:
1007
+ raise ValueError("File not in write mode")
1008
+ if self.closed:
1009
+ raise ValueError("I/O operation on closed file.")
1010
+ if self.forced:
1011
+ raise ValueError("This file has been force-flushed, can only close")
1012
+ out = self.buffer.write(data)
1013
+ self.loc += out
1014
+ if self.buffer.tell() >= self.blocksize:
1015
+ await self.flush()
1016
+ return out
1017
+
1018
+ async def close(self):
1019
+ """Close file
1020
+
1021
+ Finalizes writes, discards cache
1022
+ """
1023
+ if getattr(self, "_unclosable", False):
1024
+ return
1025
+ if self.closed:
1026
+ return
1027
+ if self.mode == "rb":
1028
+ self.cache = None
1029
+ else:
1030
+ if not self.forced:
1031
+ await self.flush(force=True)
1032
+
1033
+ if self.fs is not None:
1034
+ self.fs.invalidate_cache(self.path)
1035
+ self.fs.invalidate_cache(self.fs._parent(self.path))
1036
+
1037
+ self.closed = True
1038
+
1039
+ async def flush(self, force=False):
1040
+ if self.closed:
1041
+ raise ValueError("Flush on closed file")
1042
+ if force and self.forced:
1043
+ raise ValueError("Force flush cannot be called more than once")
1044
+ if force:
1045
+ self.forced = True
1046
+
1047
+ if self.mode not in {"wb", "ab"}:
1048
+ # no-op to flush on read-mode
1049
+ return
1050
+
1051
+ if not force and self.buffer.tell() < self.blocksize:
1052
+ # Defer write on small block
1053
+ return
1054
+
1055
+ if self.offset is None:
1056
+ # Initialize a multipart upload
1057
+ self.offset = 0
1058
+ try:
1059
+ await self._initiate_upload()
1060
+ except: # noqa: E722
1061
+ self.closed = True
1062
+ raise
1063
+
1064
+ if await self._upload_chunk(final=force) is not False:
1065
+ self.offset += self.buffer.seek(0, 2)
1066
+ self.buffer = io.BytesIO()
1067
+
1068
+ async def __aenter__(self):
1069
+ return self
1070
+
1071
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1072
+ await self.close()
1073
+
1074
+ async def _fetch_range(self, start, end):
1075
+ raise NotImplementedError
1076
+
1077
+ async def _initiate_upload(self):
1078
+ pass
1079
+
1080
+ async def _upload_chunk(self, final=False):
1081
+ raise NotImplementedError
env-llmeval/lib/python3.10/site-packages/fsspec/callbacks.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+
3
+
4
+ class Callback:
5
+ """
6
+ Base class and interface for callback mechanism
7
+
8
+ This class can be used directly for monitoring file transfers by
9
+ providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument,
10
+ below), or subclassed for more specialised behaviour.
11
+
12
+ Parameters
13
+ ----------
14
+ size: int (optional)
15
+ Nominal quantity for the value that corresponds to a complete
16
+ transfer, e.g., total number of tiles or total number of
17
+ bytes
18
+ value: int (0)
19
+ Starting internal counter value
20
+ hooks: dict or None
21
+ A dict of named functions to be called on each update. The signature
22
+ of these must be ``f(size, value, **kwargs)``
23
+ """
24
+
25
+ def __init__(self, size=None, value=0, hooks=None, **kwargs):
26
+ self.size = size
27
+ self.value = value
28
+ self.hooks = hooks or {}
29
+ self.kw = kwargs
30
+
31
+ def __enter__(self):
32
+ return self
33
+
34
+ def __exit__(self, *exc_args):
35
+ self.close()
36
+
37
+ def close(self):
38
+ """Close callback."""
39
+
40
+ def branched(self, path_1, path_2, **kwargs):
41
+ """
42
+ Return callback for child transfers
43
+
44
+ If this callback is operating at a higher level, e.g., put, which may
45
+ trigger transfers that can also be monitored. The function returns a callback
46
+ that has to be passed to the child method, e.g., put_file,
47
+ as `callback=` argument.
48
+
49
+ The implementation uses `callback.branch` for compatibility.
50
+ When implementing callbacks, it is recommended to override this function instead
51
+ of `branch` and avoid calling `super().branched(...)`.
52
+
53
+ Prefer using this function over `branch`.
54
+
55
+ Parameters
56
+ ----------
57
+ path_1: str
58
+ Child's source path
59
+ path_2: str
60
+ Child's destination path
61
+ **kwargs:
62
+ Arbitrary keyword arguments
63
+
64
+ Returns
65
+ -------
66
+ callback: Callback
67
+ A callback instance to be passed to the child method
68
+ """
69
+ self.branch(path_1, path_2, kwargs)
70
+ # mutate kwargs so that we can force the caller to pass "callback=" explicitly
71
+ return kwargs.pop("callback", DEFAULT_CALLBACK)
72
+
73
+ def branch_coro(self, fn):
74
+ """
75
+ Wraps a coroutine, and pass a new child callback to it.
76
+ """
77
+
78
+ @wraps(fn)
79
+ async def func(path1, path2: str, **kwargs):
80
+ with self.branched(path1, path2, **kwargs) as child:
81
+ return await fn(path1, path2, callback=child, **kwargs)
82
+
83
+ return func
84
+
85
+ def set_size(self, size):
86
+ """
87
+ Set the internal maximum size attribute
88
+
89
+ Usually called if not initially set at instantiation. Note that this
90
+ triggers a ``call()``.
91
+
92
+ Parameters
93
+ ----------
94
+ size: int
95
+ """
96
+ self.size = size
97
+ self.call()
98
+
99
+ def absolute_update(self, value):
100
+ """
101
+ Set the internal value state
102
+
103
+ Triggers ``call()``
104
+
105
+ Parameters
106
+ ----------
107
+ value: int
108
+ """
109
+ self.value = value
110
+ self.call()
111
+
112
+ def relative_update(self, inc=1):
113
+ """
114
+ Delta increment the internal counter
115
+
116
+ Triggers ``call()``
117
+
118
+ Parameters
119
+ ----------
120
+ inc: int
121
+ """
122
+ self.value += inc
123
+ self.call()
124
+
125
+ def call(self, hook_name=None, **kwargs):
126
+ """
127
+ Execute hook(s) with current state
128
+
129
+ Each function is passed the internal size and current value
130
+
131
+ Parameters
132
+ ----------
133
+ hook_name: str or None
134
+ If given, execute on this hook
135
+ kwargs: passed on to (all) hook(s)
136
+ """
137
+ if not self.hooks:
138
+ return
139
+ kw = self.kw.copy()
140
+ kw.update(kwargs)
141
+ if hook_name:
142
+ if hook_name not in self.hooks:
143
+ return
144
+ return self.hooks[hook_name](self.size, self.value, **kw)
145
+ for hook in self.hooks.values() or []:
146
+ hook(self.size, self.value, **kw)
147
+
148
+ def wrap(self, iterable):
149
+ """
150
+ Wrap an iterable to call ``relative_update`` on each iterations
151
+
152
+ Parameters
153
+ ----------
154
+ iterable: Iterable
155
+ The iterable that is being wrapped
156
+ """
157
+ for item in iterable:
158
+ self.relative_update()
159
+ yield item
160
+
161
+ def branch(self, path_1, path_2, kwargs):
162
+ """
163
+ Set callbacks for child transfers
164
+
165
+ If this callback is operating at a higher level, e.g., put, which may
166
+ trigger transfers that can also be monitored. The passed kwargs are
167
+ to be *mutated* to add ``callback=``, if this class supports branching
168
+ to children.
169
+
170
+ Parameters
171
+ ----------
172
+ path_1: str
173
+ Child's source path
174
+ path_2: str
175
+ Child's destination path
176
+ kwargs: dict
177
+ arguments passed to child method, e.g., put_file.
178
+
179
+ Returns
180
+ -------
181
+
182
+ """
183
+ return None
184
+
185
+ def no_op(self, *_, **__):
186
+ pass
187
+
188
+ def __getattr__(self, item):
189
+ """
190
+ If undefined methods are called on this class, nothing happens
191
+ """
192
+ return self.no_op
193
+
194
+ @classmethod
195
+ def as_callback(cls, maybe_callback=None):
196
+ """Transform callback=... into Callback instance
197
+
198
+ For the special value of ``None``, return the global instance of
199
+ ``NoOpCallback``. This is an alternative to including
200
+ ``callback=DEFAULT_CALLBACK`` directly in a method signature.
201
+ """
202
+ if maybe_callback is None:
203
+ return DEFAULT_CALLBACK
204
+ return maybe_callback
205
+
206
+
207
+ class NoOpCallback(Callback):
208
+ """
209
+ This implementation of Callback does exactly nothing
210
+ """
211
+
212
+ def call(self, *args, **kwargs):
213
+ return None
214
+
215
+
216
+ class DotPrinterCallback(Callback):
217
+ """
218
+ Simple example Callback implementation
219
+
220
+ Almost identical to Callback with a hook that prints a char; here we
221
+ demonstrate how the outer layer may print "#" and the inner layer "."
222
+ """
223
+
224
+ def __init__(self, chr_to_print="#", **kwargs):
225
+ self.chr = chr_to_print
226
+ super().__init__(**kwargs)
227
+
228
+ def branch(self, path_1, path_2, kwargs):
229
+ """Mutate kwargs to add new instance with different print char"""
230
+ kwargs["callback"] = DotPrinterCallback(".")
231
+
232
+ def call(self, **kwargs):
233
+ """Just outputs a character"""
234
+ print(self.chr, end="")
235
+
236
+
237
+ class TqdmCallback(Callback):
238
+ """
239
+ A callback to display a progress bar using tqdm
240
+
241
+ Parameters
242
+ ----------
243
+ tqdm_kwargs : dict, (optional)
244
+ Any argument accepted by the tqdm constructor.
245
+ See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_.
246
+ Will be forwarded to `tqdm_cls`.
247
+ tqdm_cls: (optional)
248
+ subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`.
249
+
250
+ Examples
251
+ --------
252
+ >>> import fsspec
253
+ >>> from fsspec.callbacks import TqdmCallback
254
+ >>> fs = fsspec.filesystem("memory")
255
+ >>> path2distant_data = "/your-path"
256
+ >>> fs.upload(
257
+ ".",
258
+ path2distant_data,
259
+ recursive=True,
260
+ callback=TqdmCallback(),
261
+ )
262
+
263
+ You can forward args to tqdm using the ``tqdm_kwargs`` parameter.
264
+
265
+ >>> fs.upload(
266
+ ".",
267
+ path2distant_data,
268
+ recursive=True,
269
+ callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}),
270
+ )
271
+
272
+ You can also customize the progress bar by passing a subclass of `tqdm`.
273
+
274
+ .. code-block:: python
275
+
276
+ class TqdmFormat(tqdm):
277
+ '''Provides a `total_time` format parameter'''
278
+ @property
279
+ def format_dict(self):
280
+ d = super().format_dict
281
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
282
+ d.update(total_time=self.format_interval(total_time) + " in total")
283
+ return d
284
+
285
+ >>> with TqdmCallback(
286
+ tqdm_kwargs={
287
+ "desc": "desc",
288
+ "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}",
289
+ },
290
+ tqdm_cls=TqdmFormat,
291
+ ) as callback:
292
+ fs.upload(".", path2distant_data, recursive=True, callback=callback)
293
+ """
294
+
295
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
296
+ try:
297
+ from tqdm import tqdm
298
+
299
+ except ImportError as exce:
300
+ raise ImportError(
301
+ "Using TqdmCallback requires tqdm to be installed"
302
+ ) from exce
303
+
304
+ self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm)
305
+ self._tqdm_kwargs = tqdm_kwargs or {}
306
+ self.tqdm = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def call(self, *args, **kwargs):
310
+ if self.tqdm is None:
311
+ self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs)
312
+ self.tqdm.total = self.size
313
+ self.tqdm.update(self.value - self.tqdm.n)
314
+
315
+ def close(self):
316
+ if self.tqdm is not None:
317
+ self.tqdm.close()
318
+ self.tqdm = None
319
+
320
+ def __del__(self):
321
+ return self.close()
322
+
323
+
324
+ DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
env-llmeval/lib/python3.10/site-packages/fsspec/transaction.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+
3
+
4
+ class Transaction:
5
+ """Filesystem transaction write context
6
+
7
+ Gathers files for deferred commit or discard, so that several write
8
+ operations can be finalized semi-atomically. This works by having this
9
+ instance as the ``.transaction`` attribute of the given filesystem
10
+ """
11
+
12
+ def __init__(self, fs):
13
+ """
14
+ Parameters
15
+ ----------
16
+ fs: FileSystem instance
17
+ """
18
+ self.fs = fs
19
+ self.files = deque()
20
+
21
+ def __enter__(self):
22
+ self.start()
23
+ return self
24
+
25
+ def __exit__(self, exc_type, exc_val, exc_tb):
26
+ """End transaction and commit, if exit is not due to exception"""
27
+ # only commit if there was no exception
28
+ self.complete(commit=exc_type is None)
29
+ self.fs._intrans = False
30
+ self.fs._transaction = None
31
+
32
+ def start(self):
33
+ """Start a transaction on this FileSystem"""
34
+ self.files = deque() # clean up after previous failed completions
35
+ self.fs._intrans = True
36
+
37
+ def complete(self, commit=True):
38
+ """Finish transaction: commit or discard all deferred files"""
39
+ while self.files:
40
+ f = self.files.popleft()
41
+ if commit:
42
+ f.commit()
43
+ else:
44
+ f.discard()
45
+ self.fs._intrans = False
46
+
47
+
48
+ class FileActor:
49
+ def __init__(self):
50
+ self.files = []
51
+
52
+ def commit(self):
53
+ for f in self.files:
54
+ f.commit()
55
+ self.files.clear()
56
+
57
+ def discard(self):
58
+ for f in self.files:
59
+ f.discard()
60
+ self.files.clear()
61
+
62
+ def append(self, f):
63
+ self.files.append(f)
64
+
65
+
66
+ class DaskTransaction(Transaction):
67
+ def __init__(self, fs):
68
+ """
69
+ Parameters
70
+ ----------
71
+ fs: FileSystem instance
72
+ """
73
+ import distributed
74
+
75
+ super().__init__(fs)
76
+ client = distributed.default_client()
77
+ self.files = client.submit(FileActor, actor=True).result()
78
+
79
+ def complete(self, commit=True):
80
+ """Finish transaction: commit or discard all deferred files"""
81
+ if commit:
82
+ self.files.commit().result()
83
+ else:
84
+ self.files.discard().result()
85
+ self.fs._intrans = False
env-llmeval/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/METADATA ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-nvtx-cu12
3
+ Version: 12.1.105
4
+ Summary: NVIDIA Tools Extension
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+
35
+ A C-based API for annotating events, code ranges, and resources in your applications. Applications which integrate NVTX can use the Visual Profiler to capture and visualize these events and ranges.
env-llmeval/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+
env-llmeval/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
env-llmeval/lib/python3.10/site-packages/responses-0.18.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/responses-0.18.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ responses
env-llmeval/lib/python3.10/site-packages/safetensors/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Re-export this
2
+ from ._safetensors_rust import ( # noqa: F401
3
+ SafetensorError,
4
+ __version__,
5
+ deserialize,
6
+ safe_open,
7
+ serialize,
8
+ serialize_file,
9
+ )
env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc ADDED
Binary file (4.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/mlx.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc ADDED
Binary file (5.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/safetensors/flax.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Optional, Union
3
+
4
+ import numpy as np
5
+
6
+ import jax.numpy as jnp
7
+ from jax import Array
8
+ from safetensors import numpy, safe_open
9
+
10
+
11
+ def save(tensors: Dict[str, Array], metadata: Optional[Dict[str, str]] = None) -> bytes:
12
+ """
13
+ Saves a dictionary of tensors into raw bytes in safetensors format.
14
+
15
+ Args:
16
+ tensors (`Dict[str, Array]`):
17
+ The incoming tensors. Tensors need to be contiguous and dense.
18
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
19
+ Optional text only metadata you might want to save in your header.
20
+ For instance it can be useful to specify more about the underlying
21
+ tensors. This is purely informative and does not affect tensor loading.
22
+
23
+ Returns:
24
+ `bytes`: The raw bytes representing the format
25
+
26
+ Example:
27
+
28
+ ```python
29
+ from safetensors.flax import save
30
+ from jax import numpy as jnp
31
+
32
+ tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))}
33
+ byte_data = save(tensors)
34
+ ```
35
+ """
36
+ np_tensors = _jnp2np(tensors)
37
+ return numpy.save(np_tensors, metadata=metadata)
38
+
39
+
40
+ def save_file(
41
+ tensors: Dict[str, Array],
42
+ filename: Union[str, os.PathLike],
43
+ metadata: Optional[Dict[str, str]] = None,
44
+ ) -> None:
45
+ """
46
+ Saves a dictionary of tensors into raw bytes in safetensors format.
47
+
48
+ Args:
49
+ tensors (`Dict[str, Array]`):
50
+ The incoming tensors. Tensors need to be contiguous and dense.
51
+ filename (`str`, or `os.PathLike`)):
52
+ The filename we're saving into.
53
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
54
+ Optional text only metadata you might want to save in your header.
55
+ For instance it can be useful to specify more about the underlying
56
+ tensors. This is purely informative and does not affect tensor loading.
57
+
58
+ Returns:
59
+ `None`
60
+
61
+ Example:
62
+
63
+ ```python
64
+ from safetensors.flax import save_file
65
+ from jax import numpy as jnp
66
+
67
+ tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))}
68
+ save_file(tensors, "model.safetensors")
69
+ ```
70
+ """
71
+ np_tensors = _jnp2np(tensors)
72
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
73
+
74
+
75
+ def load(data: bytes) -> Dict[str, Array]:
76
+ """
77
+ Loads a safetensors file into flax format from pure bytes.
78
+
79
+ Args:
80
+ data (`bytes`):
81
+ The content of a safetensors file
82
+
83
+ Returns:
84
+ `Dict[str, Array]`: dictionary that contains name as key, value as `Array` on cpu
85
+
86
+ Example:
87
+
88
+ ```python
89
+ from safetensors.flax import load
90
+
91
+ file_path = "./my_folder/bert.safetensors"
92
+ with open(file_path, "rb") as f:
93
+ data = f.read()
94
+
95
+ loaded = load(data)
96
+ ```
97
+ """
98
+ flat = numpy.load(data)
99
+ return _np2jnp(flat)
100
+
101
+
102
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, Array]:
103
+ """
104
+ Loads a safetensors file into flax format.
105
+
106
+ Args:
107
+ filename (`str`, or `os.PathLike`)):
108
+ The name of the file which contains the tensors
109
+
110
+ Returns:
111
+ `Dict[str, Array]`: dictionary that contains name as key, value as `Array`
112
+
113
+ Example:
114
+
115
+ ```python
116
+ from safetensors.flax import load_file
117
+
118
+ file_path = "./my_folder/bert.safetensors"
119
+ loaded = load_file(file_path)
120
+ ```
121
+ """
122
+ result = {}
123
+ with safe_open(filename, framework="flax") as f:
124
+ for k in f.keys():
125
+ result[k] = f.get_tensor(k)
126
+ return result
127
+
128
+
129
+ def _np2jnp(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, Array]:
130
+ for k, v in numpy_dict.items():
131
+ numpy_dict[k] = jnp.array(v)
132
+ return numpy_dict
133
+
134
+
135
+ def _jnp2np(jnp_dict: Dict[str, Array]) -> Dict[str, np.array]:
136
+ for k, v in jnp_dict.items():
137
+ jnp_dict[k] = np.asarray(v)
138
+ return jnp_dict
env-llmeval/lib/python3.10/site-packages/safetensors/paddle.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Optional, Union
3
+
4
+ import numpy as np
5
+
6
+ import paddle
7
+ from safetensors import numpy
8
+
9
+
10
+ def save(tensors: Dict[str, paddle.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes:
11
+ """
12
+ Saves a dictionary of tensors into raw bytes in safetensors format.
13
+
14
+ Args:
15
+ tensors (`Dict[str, paddle.Tensor]`):
16
+ The incoming tensors. Tensors need to be contiguous and dense.
17
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
18
+ Optional text only metadata you might want to save in your header.
19
+ For instance it can be useful to specify more about the underlying
20
+ tensors. This is purely informative and does not affect tensor loading.
21
+
22
+ Returns:
23
+ `bytes`: The raw bytes representing the format
24
+
25
+ Example:
26
+
27
+ ```python
28
+ from safetensors.paddle import save
29
+ import paddle
30
+
31
+ tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))}
32
+ byte_data = save(tensors)
33
+ ```
34
+ """
35
+ np_tensors = _paddle2np(tensors)
36
+ return numpy.save(np_tensors, metadata=metadata)
37
+
38
+
39
+ def save_file(
40
+ tensors: Dict[str, paddle.Tensor],
41
+ filename: Union[str, os.PathLike],
42
+ metadata: Optional[Dict[str, str]] = None,
43
+ ) -> None:
44
+ """
45
+ Saves a dictionary of tensors into raw bytes in safetensors format.
46
+
47
+ Args:
48
+ tensors (`Dict[str, paddle.Tensor]`):
49
+ The incoming tensors. Tensors need to be contiguous and dense.
50
+ filename (`str`, or `os.PathLike`)):
51
+ The filename we're saving into.
52
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
53
+ Optional text only metadata you might want to save in your header.
54
+ For instance it can be useful to specify more about the underlying
55
+ tensors. This is purely informative and does not affect tensor loading.
56
+
57
+ Returns:
58
+ `None`
59
+
60
+ Example:
61
+
62
+ ```python
63
+ from safetensors.paddle import save_file
64
+ import paddle
65
+
66
+ tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))}
67
+ save_file(tensors, "model.safetensors")
68
+ ```
69
+ """
70
+ np_tensors = _paddle2np(tensors)
71
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
72
+
73
+
74
+ def load(data: bytes, device: str = "cpu") -> Dict[str, paddle.Tensor]:
75
+ """
76
+ Loads a safetensors file into paddle format from pure bytes.
77
+
78
+ Args:
79
+ data (`bytes`):
80
+ The content of a safetensors file
81
+
82
+ Returns:
83
+ `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` on cpu
84
+
85
+ Example:
86
+
87
+ ```python
88
+ from safetensors.paddle import load
89
+
90
+ file_path = "./my_folder/bert.safetensors"
91
+ with open(file_path, "rb") as f:
92
+ data = f.read()
93
+
94
+ loaded = load(data)
95
+ ```
96
+ """
97
+ flat = numpy.load(data)
98
+ return _np2paddle(flat, device)
99
+
100
+
101
+ def load_file(filename: Union[str, os.PathLike], device="cpu") -> Dict[str, paddle.Tensor]:
102
+ """
103
+ Loads a safetensors file into paddle format.
104
+
105
+ Args:
106
+ filename (`str`, or `os.PathLike`)):
107
+ The name of the file which contains the tensors
108
+ device (`Dict[str, any]`, *optional*, defaults to `cpu`):
109
+ The device where the tensors need to be located after load.
110
+ available options are all regular paddle device locations
111
+
112
+ Returns:
113
+ `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor`
114
+
115
+ Example:
116
+
117
+ ```python
118
+ from safetensors.paddle import load_file
119
+
120
+ file_path = "./my_folder/bert.safetensors"
121
+ loaded = load_file(file_path)
122
+ ```
123
+ """
124
+ flat = numpy.load_file(filename)
125
+ output = _np2paddle(flat, device)
126
+ return output
127
+
128
+
129
+ def _np2paddle(numpy_dict: Dict[str, np.ndarray], device: str = "cpu") -> Dict[str, paddle.Tensor]:
130
+ for k, v in numpy_dict.items():
131
+ numpy_dict[k] = paddle.to_tensor(v, place=device)
132
+ return numpy_dict
133
+
134
+
135
+ def _paddle2np(paddle_dict: Dict[str, paddle.Tensor]) -> Dict[str, np.array]:
136
+ for k, v in paddle_dict.items():
137
+ paddle_dict[k] = v.detach().cpu().numpy()
138
+ return paddle_dict
env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/LICENSE ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2006-2023 SymPy Development Team
2
+
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ a. Redistributions of source code must retain the above copyright notice,
9
+ this list of conditions and the following disclaimer.
10
+ b. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+ c. Neither the name of SymPy nor the names of its contributors
14
+ may be used to endorse or promote products derived from this software
15
+ without specific prior written permission.
16
+
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
+ ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
22
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28
+ DAMAGE.
29
+
30
+ --------------------------------------------------------------------------------
31
+
32
+ Patches that were taken from the Diofant project (https://github.com/diofant/diofant)
33
+ are licensed as:
34
+
35
+ Copyright (c) 2006-2018 SymPy Development Team,
36
+ 2013-2023 Sergey B Kirpichev
37
+
38
+ All rights reserved.
39
+
40
+ Redistribution and use in source and binary forms, with or without
41
+ modification, are permitted provided that the following conditions are met:
42
+
43
+ a. Redistributions of source code must retain the above copyright notice,
44
+ this list of conditions and the following disclaimer.
45
+ b. Redistributions in binary form must reproduce the above copyright
46
+ notice, this list of conditions and the following disclaimer in the
47
+ documentation and/or other materials provided with the distribution.
48
+ c. Neither the name of Diofant or SymPy nor the names of its contributors
49
+ may be used to endorse or promote products derived from this software
50
+ without specific prior written permission.
51
+
52
+
53
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
54
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56
+ ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
57
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
60
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
63
+ DAMAGE.
64
+
65
+ --------------------------------------------------------------------------------
66
+
67
+ Submodules taken from the multipledispatch project (https://github.com/mrocklin/multipledispatch)
68
+ are licensed as:
69
+
70
+ Copyright (c) 2014 Matthew Rocklin
71
+
72
+ All rights reserved.
73
+
74
+ Redistribution and use in source and binary forms, with or without
75
+ modification, are permitted provided that the following conditions are met:
76
+
77
+ a. Redistributions of source code must retain the above copyright notice,
78
+ this list of conditions and the following disclaimer.
79
+ b. Redistributions in binary form must reproduce the above copyright
80
+ notice, this list of conditions and the following disclaimer in the
81
+ documentation and/or other materials provided with the distribution.
82
+ c. Neither the name of multipledispatch nor the names of its contributors
83
+ may be used to endorse or promote products derived from this software
84
+ without specific prior written permission.
85
+
86
+
87
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
88
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
89
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
90
+ ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
91
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
92
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
93
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
94
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
95
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
96
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
97
+ DAMAGE.
98
+
99
+ --------------------------------------------------------------------------------
100
+
101
+ The files under the directory sympy/parsing/autolev/tests/pydy-example-repo
102
+ are directly copied from PyDy project and are licensed as:
103
+
104
+ Copyright (c) 2009-2023, PyDy Authors
105
+ All rights reserved.
106
+
107
+ Redistribution and use in source and binary forms, with or without
108
+ modification, are permitted provided that the following conditions are met:
109
+
110
+ * Redistributions of source code must retain the above copyright
111
+ notice, this list of conditions and the following disclaimer.
112
+ * Redistributions in binary form must reproduce the above copyright
113
+ notice, this list of conditions and the following disclaimer in the
114
+ documentation and/or other materials provided with the distribution.
115
+ * Neither the name of this project nor the names of its contributors may be
116
+ used to endorse or promote products derived from this software without
117
+ specific prior written permission.
118
+
119
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
120
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
121
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
122
+ DISCLAIMED. IN NO EVENT SHALL PYDY AUTHORS BE LIABLE FOR ANY DIRECT,
123
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
124
+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
125
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
126
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
127
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
128
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
129
+
130
+ --------------------------------------------------------------------------------
131
+
132
+ The files under the directory sympy/parsing/latex
133
+ are directly copied from latex2sympy project and are licensed as:
134
+
135
+ Copyright 2016, latex2sympy
136
+
137
+ Permission is hereby granted, free of charge, to any person obtaining a copy
138
+ of this software and associated documentation files (the "Software"), to deal
139
+ in the Software without restriction, including without limitation the rights
140
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
141
+ copies of the Software, and to permit persons to whom the Software is
142
+ furnished to do so, subject to the following conditions:
143
+
144
+ The above copyright notice and this permission notice shall be included in all
145
+ copies or substantial portions of the Software.
146
+
147
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
148
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
149
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
150
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
151
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
152
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
153
+ SOFTWARE.
env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/METADATA ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: sympy
3
+ Version: 1.12
4
+ Summary: Computer algebra system (CAS) in Python
5
+ Home-page: https://sympy.org
6
+ Author: SymPy development team
7
+ Author-email: [email protected]
8
+ License: BSD
9
+ Project-URL: Source, https://github.com/sympy/sympy
10
+ Keywords: Math CAS
11
+ Classifier: License :: OSI Approved :: BSD License
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Topic :: Scientific/Engineering
15
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
16
+ Classifier: Topic :: Scientific/Engineering :: Physics
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3 :: Only
23
+ Classifier: Programming Language :: Python :: Implementation :: CPython
24
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
25
+ Requires-Python: >=3.8
26
+ Description-Content-Type: text/markdown
27
+ License-File: LICENSE
28
+ License-File: AUTHORS
29
+ Requires-Dist: mpmath (>=0.19)
30
+
31
+ # SymPy
32
+
33
+ [![pypi version](https://img.shields.io/pypi/v/sympy.svg)](https://pypi.python.org/pypi/sympy)
34
+ [![Join the chat at https://gitter.im/sympy/sympy](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sympy/sympy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
35
+ [![Zenodo Badge](https://zenodo.org/badge/18918/sympy/sympy.svg)](https://zenodo.org/badge/latestdoi/18918/sympy/sympy)
36
+ [![Downloads](https://pepy.tech/badge/sympy/month)](https://pepy.tech/project/sympy)
37
+ [![GitHub Issues](https://img.shields.io/badge/issue_tracking-github-blue.svg)](https://github.com/sympy/sympy/issues)
38
+ [![Git Tutorial](https://img.shields.io/badge/PR-Welcome-%23FF8300.svg?)](https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project)
39
+ [![Powered by NumFocus](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org)
40
+ [![Commits since last release](https://img.shields.io/github/commits-since/sympy/sympy/latest.svg?longCache=true&style=flat-square&logo=git&logoColor=fff)](https://github.com/sympy/sympy/releases)
41
+
42
+ [![SymPy Banner](https://github.com/sympy/sympy/raw/master/banner.svg)](https://sympy.org/)
43
+
44
+
45
+ See the [AUTHORS](AUTHORS) file for the list of authors.
46
+
47
+ And many more people helped on the SymPy mailing list, reported bugs,
48
+ helped organize SymPy's participation in the Google Summer of Code, the
49
+ Google Highly Open Participation Contest, Google Code-In, wrote and
50
+ blogged about SymPy...
51
+
52
+ License: New BSD License (see the [LICENSE](LICENSE) file for details) covers all
53
+ files in the sympy repository unless stated otherwise.
54
+
55
+ Our mailing list is at
56
+ <https://groups.google.com/forum/?fromgroups#!forum/sympy>.
57
+
58
+ We have a community chat at [Gitter](https://gitter.im/sympy/sympy). Feel
59
+ free to ask us anything there. We have a very welcoming and helpful
60
+ community.
61
+
62
+ ## Download
63
+
64
+ The recommended installation method is through Anaconda,
65
+ <https://www.anaconda.com/products/distribution>
66
+
67
+ You can also get the latest version of SymPy from
68
+ <https://pypi.python.org/pypi/sympy/>
69
+
70
+ To get the git version do
71
+
72
+ $ git clone https://github.com/sympy/sympy.git
73
+
74
+ For other options (tarballs, debs, etc.), see
75
+ <https://docs.sympy.org/dev/install.html>.
76
+
77
+ ## Documentation and Usage
78
+
79
+ For in-depth instructions on installation and building the
80
+ documentation, see the [SymPy Documentation Style Guide](https://docs.sympy.org/dev/documentation-style-guide.html).
81
+
82
+ Everything is at:
83
+
84
+ <https://docs.sympy.org/>
85
+
86
+ You can generate everything at the above site in your local copy of
87
+ SymPy by:
88
+
89
+ $ cd doc
90
+ $ make html
91
+
92
+ Then the docs will be in <span class="title-ref">\_build/html</span>. If
93
+ you don't want to read that, here is a short usage:
94
+
95
+ From this directory, start Python and:
96
+
97
+ ``` python
98
+ >>> from sympy import Symbol, cos
99
+ >>> x = Symbol('x')
100
+ >>> e = 1/cos(x)
101
+ >>> print(e.series(x, 0, 10))
102
+ 1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + 277*x**8/8064 + O(x**10)
103
+ ```
104
+
105
+ SymPy also comes with a console that is a simple wrapper around the
106
+ classic python console (or IPython when available) that loads the SymPy
107
+ namespace and executes some common commands for you.
108
+
109
+ To start it, issue:
110
+
111
+ $ bin/isympy
112
+
113
+ from this directory, if SymPy is not installed or simply:
114
+
115
+ $ isympy
116
+
117
+ if SymPy is installed.
118
+
119
+ ## Installation
120
+
121
+ SymPy has a hard dependency on the [mpmath](http://mpmath.org/) library
122
+ (version \>= 0.19). You should install it first, please refer to the
123
+ mpmath installation guide:
124
+
125
+ <https://github.com/fredrik-johansson/mpmath#1-download--installation>
126
+
127
+ To install SymPy using PyPI, run the following command:
128
+
129
+ $ pip install sympy
130
+
131
+ To install SymPy using Anaconda, run the following command:
132
+
133
+ $ conda install -c anaconda sympy
134
+
135
+ To install SymPy from GitHub source, first clone SymPy using `git`:
136
+
137
+ $ git clone https://github.com/sympy/sympy.git
138
+
139
+ Then, in the `sympy` repository that you cloned, simply run:
140
+
141
+ $ pip install .
142
+
143
+ See <https://docs.sympy.org/dev/install.html> for more information.
144
+
145
+ ## Contributing
146
+
147
+ We welcome contributions from anyone, even if you are new to open
148
+ source. Please read our [Introduction to Contributing](https://github.com/sympy/sympy/wiki/Introduction-to-contributing)
149
+ page and the [SymPy Documentation Style Guide](https://docs.sympy.org/dev/documentation-style-guide.html). If you
150
+ are new and looking for some way to contribute, a good place to start is
151
+ to look at the issues tagged [Easy to Fix](https://github.com/sympy/sympy/issues?q=is%3Aopen+is%3Aissue+label%3A%22Easy+to+Fix%22).
152
+
153
+ Please note that all participants in this project are expected to follow
154
+ our Code of Conduct. By participating in this project you agree to abide
155
+ by its terms. See [CODE\_OF\_CONDUCT.md](CODE_OF_CONDUCT.md).
156
+
157
+ ## Tests
158
+
159
+ To execute all tests, run:
160
+
161
+ $./setup.py test
162
+
163
+ in the current directory.
164
+
165
+ For the more fine-grained running of tests or doctests, use `bin/test`
166
+ or respectively `bin/doctest`. The master branch is automatically tested
167
+ by GitHub Actions.
168
+
169
+ To test pull requests, use
170
+ [sympy-bot](https://github.com/sympy/sympy-bot).
171
+
172
+ ## Regenerate Experimental <span class="title-ref">LaTeX</span> Parser/Lexer
173
+
174
+ The parser and lexer were generated with the [ANTLR4](http://antlr4.org)
175
+ toolchain in `sympy/parsing/latex/_antlr` and checked into the repo.
176
+ Presently, most users should not need to regenerate these files, but
177
+ if you plan to work on this feature, you will need the `antlr4`
178
+ command-line tool (and you must ensure that it is in your `PATH`).
179
+ One way to get it is:
180
+
181
+ $ conda install -c conda-forge antlr=4.11.1
182
+
183
+ Alternatively, follow the instructions on the ANTLR website and download
184
+ the `antlr-4.11.1-complete.jar`. Then export the `CLASSPATH` as instructed
185
+ and instead of creating `antlr4` as an alias, make it an executable file
186
+ with the following contents:
187
+ ``` bash
188
+ #!/bin/bash
189
+ java -jar /usr/local/lib/antlr-4.11.1-complete.jar "$@"
190
+ ```
191
+
192
+ After making changes to `sympy/parsing/latex/LaTeX.g4`, run:
193
+
194
+ $ ./setup.py antlr
195
+
196
+ ## Clean
197
+
198
+ To clean everything (thus getting the same tree as in the repository):
199
+
200
+ $ git clean -Xdf
201
+
202
+ which will clear everything ignored by `.gitignore`, and:
203
+
204
+ $ git clean -df
205
+
206
+ to clear all untracked files. You can revert the most recent changes in
207
+ git with:
208
+
209
+ $ git reset --hard
210
+
211
+ WARNING: The above commands will all clear changes you may have made,
212
+ and you will lose them forever. Be sure to check things with `git
213
+ status`, `git diff`, `git clean -Xn`, and `git clean -n` before doing any
214
+ of those.
215
+
216
+ ## Bugs
217
+
218
+ Our issue tracker is at <https://github.com/sympy/sympy/issues>. Please
219
+ report any bugs that you find. Or, even better, fork the repository on
220
+ GitHub and create a pull request. We welcome all changes, big or small,
221
+ and we will help you make the pull request if you are new to git (just
222
+ ask on our mailing list or Gitter Channel). If you further have any queries, you can find answers
223
+ on Stack Overflow using the [sympy](https://stackoverflow.com/questions/tagged/sympy) tag.
224
+
225
+ ## Brief History
226
+
227
+ SymPy was started by Ondřej Čertík in 2005, he wrote some code during
228
+ the summer, then he wrote some more code during summer 2006. In February
229
+ 2007, Fabian Pedregosa joined the project and helped fix many things,
230
+ contributed documentation, and made it alive again. 5 students (Mateusz
231
+ Paprocki, Brian Jorgensen, Jason Gedge, Robert Schwarz, and Chris Wu)
232
+ improved SymPy incredibly during summer 2007 as part of the Google
233
+ Summer of Code. Pearu Peterson joined the development during the summer
234
+ 2007 and he has made SymPy much more competitive by rewriting the core
235
+ from scratch, which has made it from 10x to 100x faster. Jurjen N.E. Bos
236
+ has contributed pretty-printing and other patches. Fredrik Johansson has
237
+ written mpmath and contributed a lot of patches.
238
+
239
+ SymPy has participated in every Google Summer of Code since 2007. You
240
+ can see <https://github.com/sympy/sympy/wiki#google-summer-of-code> for
241
+ full details. Each year has improved SymPy by bounds. Most of SymPy's
242
+ development has come from Google Summer of Code students.
243
+
244
+ In 2011, Ondřej Čertík stepped down as lead developer, with Aaron
245
+ Meurer, who also started as a Google Summer of Code student, taking his
246
+ place. Ondřej Čertík is still active in the community but is too busy
247
+ with work and family to play a lead development role.
248
+
249
+ Since then, a lot more people have joined the development and some
250
+ people have also left. You can see the full list in doc/src/aboutus.rst,
251
+ or online at:
252
+
253
+ <https://docs.sympy.org/dev/aboutus.html#sympy-development-team>
254
+
255
+ The git history goes back to 2007 when development moved from svn to hg.
256
+ To see the history before that point, look at
257
+ <https://github.com/sympy/sympy-old>.
258
+
259
+ You can use git to see the biggest developers. The command:
260
+
261
+ $ git shortlog -ns
262
+
263
+ will show each developer, sorted by commits to the project. The command:
264
+
265
+ $ git shortlog -ns --since="1 year"
266
+
267
+ will show the top developers from the last year.
268
+
269
+ ## Citation
270
+
271
+ To cite SymPy in publications use
272
+
273
+ > Meurer A, Smith CP, Paprocki M, Čertík O, Kirpichev SB, Rocklin M,
274
+ > Kumar A, Ivanov S, Moore JK, Singh S, Rathnayake T, Vig S, Granger BE,
275
+ > Muller RP, Bonazzi F, Gupta H, Vats S, Johansson F, Pedregosa F, Curry
276
+ > MJ, Terrel AR, Roučka Š, Saboo A, Fernando I, Kulal S, Cimrman R,
277
+ > Scopatz A. (2017) SymPy: symbolic computing in Python. *PeerJ Computer
278
+ > Science* 3:e103 <https://doi.org/10.7717/peerj-cs.103>
279
+
280
+ A BibTeX entry for LaTeX users is
281
+
282
+ ``` bibtex
283
+ @article{10.7717/peerj-cs.103,
284
+ title = {SymPy: symbolic computing in Python},
285
+ author = {Meurer, Aaron and Smith, Christopher P. and Paprocki, Mateusz and \v{C}ert\'{i}k, Ond\v{r}ej and Kirpichev, Sergey B. and Rocklin, Matthew and Kumar, Amit and Ivanov, Sergiu and Moore, Jason K. and Singh, Sartaj and Rathnayake, Thilina and Vig, Sean and Granger, Brian E. and Muller, Richard P. and Bonazzi, Francesco and Gupta, Harsh and Vats, Shivam and Johansson, Fredrik and Pedregosa, Fabian and Curry, Matthew J. and Terrel, Andy R. and Rou\v{c}ka, \v{S}t\v{e}p\'{a}n and Saboo, Ashutosh and Fernando, Isuru and Kulal, Sumith and Cimrman, Robert and Scopatz, Anthony},
286
+ year = 2017,
287
+ month = Jan,
288
+ keywords = {Python, Computer algebra system, Symbolics},
289
+ abstract = {
290
+ SymPy is an open-source computer algebra system written in pure Python. It is built with a focus on extensibility and ease of use, through both interactive and programmatic applications. These characteristics have led SymPy to become a popular symbolic library for the scientific Python ecosystem. This paper presents the architecture of SymPy, a description of its features, and a discussion of select submodules. The supplementary material provides additional examples and further outlines details of the architecture and features of SymPy.
291
+ },
292
+ volume = 3,
293
+ pages = {e103},
294
+ journal = {PeerJ Computer Science},
295
+ issn = {2376-5992},
296
+ url = {https://doi.org/10.7717/peerj-cs.103},
297
+ doi = {10.7717/peerj-cs.103}
298
+ }
299
+ ```
300
+
301
+ SymPy is BSD licensed, so you are free to use it whatever you like, be
302
+ it academic, commercial, creating forks or derivatives, as long as you
303
+ copy the BSD statement if you redistribute it (see the LICENSE file for
304
+ details). That said, although not required by the SymPy license, if it
305
+ is convenient for you, please cite SymPy when using it in your work and
306
+ also consider contributing all your changes back, so that we can
307
+ incorporate it and all of us will benefit in the end.
env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.40.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ isympy = isympy:main
env-llmeval/lib/python3.10/site-packages/sympy-1.12.dist-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ isympy
2
+ sympy
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (209 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc ADDED
Binary file (443 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_monitor.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm.cpython-310.pyc ADDED
Binary file (438 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_gui.cpython-310.pyc ADDED
Binary file (451 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc ADDED
Binary file (471 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc ADDED
Binary file (959 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (824 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc ADDED
Binary file (3.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/autonotebook.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/cli.cpython-310.pyc ADDED
Binary file (8.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/dask.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/gui.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/keras.cpython-310.pyc ADDED
Binary file (4.93 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/notebook.cpython-310.pyc ADDED
Binary file (7.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/rich.cpython-310.pyc ADDED
Binary file (5.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/std.cpython-310.pyc ADDED
Binary file (45.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/tk.cpython-310.pyc ADDED
Binary file (6.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/utils.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/__pycache__/version.cpython-310.pyc ADDED
Binary file (509 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tqdm/_main.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from warnings import warn
2
+
3
+ from .cli import * # NOQA
4
+ from .cli import __all__ # NOQA
5
+ from .std import TqdmDeprecationWarning
6
+
7
+ warn("This function will be removed in tqdm==5.0.0\n"
8
+ "Please use `tqdm.cli.*` instead of `tqdm._main.*`",
9
+ TqdmDeprecationWarning, stacklevel=2)
env-llmeval/lib/python3.10/site-packages/tqdm/_monitor.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from threading import Event, Thread, current_thread
3
+ from time import time
4
+ from warnings import warn
5
+
6
+ __all__ = ["TMonitor", "TqdmSynchronisationWarning"]
7
+
8
+
9
+ class TqdmSynchronisationWarning(RuntimeWarning):
10
+ """tqdm multi-thread/-process errors which may cause incorrect nesting
11
+ but otherwise no adverse effects"""
12
+ pass
13
+
14
+
15
+ class TMonitor(Thread):
16
+ """
17
+ Monitoring thread for tqdm bars.
18
+ Monitors if tqdm bars are taking too much time to display
19
+ and readjusts miniters automatically if necessary.
20
+
21
+ Parameters
22
+ ----------
23
+ tqdm_cls : class
24
+ tqdm class to use (can be core tqdm or a submodule).
25
+ sleep_interval : float
26
+ Time to sleep between monitoring checks.
27
+ """
28
+ _test = {} # internal vars for unit testing
29
+
30
+ def __init__(self, tqdm_cls, sleep_interval):
31
+ Thread.__init__(self)
32
+ self.daemon = True # kill thread when main killed (KeyboardInterrupt)
33
+ self.woken = 0 # last time woken up, to sync with monitor
34
+ self.tqdm_cls = tqdm_cls
35
+ self.sleep_interval = sleep_interval
36
+ self._time = self._test.get("time", time)
37
+ self.was_killed = self._test.get("Event", Event)()
38
+ atexit.register(self.exit)
39
+ self.start()
40
+
41
+ def exit(self):
42
+ self.was_killed.set()
43
+ if self is not current_thread():
44
+ self.join()
45
+ return self.report()
46
+
47
+ def get_instances(self):
48
+ # returns a copy of started `tqdm_cls` instances
49
+ return [i for i in self.tqdm_cls._instances.copy()
50
+ # Avoid race by checking that the instance started
51
+ if hasattr(i, 'start_t')]
52
+
53
+ def run(self):
54
+ cur_t = self._time()
55
+ while True:
56
+ # After processing and before sleeping, notify that we woke
57
+ # Need to be done just before sleeping
58
+ self.woken = cur_t
59
+ # Sleep some time...
60
+ self.was_killed.wait(self.sleep_interval)
61
+ # Quit if killed
62
+ if self.was_killed.is_set():
63
+ return
64
+ # Then monitor!
65
+ # Acquire lock (to access _instances)
66
+ with self.tqdm_cls.get_lock():
67
+ cur_t = self._time()
68
+ # Check tqdm instances are waiting too long to print
69
+ instances = self.get_instances()
70
+ for instance in instances:
71
+ # Check event in loop to reduce blocking time on exit
72
+ if self.was_killed.is_set():
73
+ return
74
+ # Only if mininterval > 1 (else iterations are just slow)
75
+ # and last refresh exceeded maxinterval
76
+ if (
77
+ instance.miniters > 1
78
+ and (cur_t - instance.last_print_t) >= instance.maxinterval
79
+ ):
80
+ # force bypassing miniters on next iteration
81
+ # (dynamic_miniters adjusts mininterval automatically)
82
+ instance.miniters = 1
83
+ # Refresh now! (works only for manual tqdm)
84
+ instance.refresh(nolock=True)
85
+ # Remove accidental long-lived strong reference
86
+ del instance
87
+ if instances != self.get_instances(): # pragma: nocover
88
+ warn("Set changed size during iteration" +
89
+ " (see https://github.com/tqdm/tqdm/issues/481)",
90
+ TqdmSynchronisationWarning, stacklevel=2)
91
+ # Remove accidental long-lived strong references
92
+ del instances
93
+
94
+ def report(self):
95
+ return not self.was_killed.is_set()
env-llmeval/lib/python3.10/site-packages/tqdm/_tqdm_pandas.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ __author__ = "github.com/casperdcl"
4
+ __all__ = ['tqdm_pandas']
5
+
6
+
7
+ def tqdm_pandas(tclass, **tqdm_kwargs):
8
+ """
9
+ Registers the given `tqdm` instance with
10
+ `pandas.core.groupby.DataFrameGroupBy.progress_apply`.
11
+ """
12
+ from tqdm import TqdmDeprecationWarning
13
+
14
+ if isinstance(tclass, type) or (getattr(tclass, '__name__', '').startswith(
15
+ 'tqdm_')): # delayed adapter case
16
+ TqdmDeprecationWarning(
17
+ "Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm, ...)`.",
18
+ fp_write=getattr(tqdm_kwargs.get('file', None), 'write', sys.stderr.write))
19
+ tclass.pandas(**tqdm_kwargs)
20
+ else:
21
+ TqdmDeprecationWarning(
22
+ "Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm(...))`.",
23
+ fp_write=getattr(tclass.fp, 'write', sys.stderr.write))
24
+ type(tclass).pandas(deprecated_t=tclass)