applied-ai-018 commited on
Commit
97f5725
·
verified ·
1 Parent(s): a47af9b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step40/zero/28.final_rmsnorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/28.final_rmsnorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/7.input_layernorm.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step40/zero/7.input_layernorm.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step40/zero/7.input_layernorm.weight/fp32.pt +3 -0
  7. venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/INSTALLER +1 -0
  8. venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/METADATA +683 -0
  9. venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/RECORD +35 -0
  10. venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/WHEEL +6 -0
  11. venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/entry_points.txt +2 -0
  12. venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/top_level.txt +1 -0
  13. venv/lib/python3.10/site-packages/frozenlist/__init__.py +95 -0
  14. venv/lib/python3.10/site-packages/frozenlist/__init__.pyi +47 -0
  15. venv/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so +0 -0
  17. venv/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx +123 -0
  18. venv/lib/python3.10/site-packages/frozenlist/py.typed +1 -0
  19. venv/lib/python3.10/site-packages/fsspec/__init__.py +70 -0
  20. venv/lib/python3.10/site-packages/fsspec/_version.py +21 -0
  21. venv/lib/python3.10/site-packages/fsspec/archive.py +73 -0
  22. venv/lib/python3.10/site-packages/fsspec/asyn.py +1096 -0
  23. venv/lib/python3.10/site-packages/fsspec/caching.py +881 -0
  24. venv/lib/python3.10/site-packages/fsspec/callbacks.py +324 -0
  25. venv/lib/python3.10/site-packages/fsspec/compression.py +174 -0
  26. venv/lib/python3.10/site-packages/fsspec/config.py +131 -0
  27. venv/lib/python3.10/site-packages/fsspec/conftest.py +55 -0
  28. venv/lib/python3.10/site-packages/fsspec/core.py +714 -0
  29. venv/lib/python3.10/site-packages/fsspec/dircache.py +98 -0
  30. venv/lib/python3.10/site-packages/fsspec/exceptions.py +17 -0
  31. venv/lib/python3.10/site-packages/fsspec/fuse.py +324 -0
  32. venv/lib/python3.10/site-packages/fsspec/generic.py +408 -0
  33. venv/lib/python3.10/site-packages/fsspec/gui.py +414 -0
  34. venv/lib/python3.10/site-packages/fsspec/implementations/__init__.py +0 -0
  35. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/data.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/fsspec/implementations/arrow.py +306 -0
  45. venv/lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py +76 -0
  46. venv/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py +232 -0
  47. venv/lib/python3.10/site-packages/fsspec/implementations/cached.py +939 -0
  48. venv/lib/python3.10/site-packages/fsspec/implementations/dask.py +152 -0
  49. venv/lib/python3.10/site-packages/fsspec/implementations/data.py +58 -0
  50. venv/lib/python3.10/site-packages/fsspec/implementations/dbfs.py +467 -0
ckpts/universal/global_step40/zero/20.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a09f326e10ffcf3f8747344aa52a2b589eb9f8f08a6d2277319f9d72194ffe99
3
+ size 9293
ckpts/universal/global_step40/zero/28.final_rmsnorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da44b3887e35582fb7f7f87f187291ddbf0ec8b74db57e83655a64c45fe17ffc
3
+ size 9387
ckpts/universal/global_step40/zero/28.final_rmsnorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1deb380704f89927a69f5da04574ab928676cf012b713720434653b8ad041bd9
3
+ size 9293
ckpts/universal/global_step40/zero/7.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bbf657b2dd94ccdc919cf465b76b756672fb29e76d09897ae0caae31ab386a6
3
+ size 9372
ckpts/universal/global_step40/zero/7.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0ea776d7fbb79807f67fcd709cc5919e0dca9baef6ae3b4758f81d441c68feb
3
+ size 9387
ckpts/universal/global_step40/zero/7.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77ff2a1f5f96465b4d2d2f8d226ffb427e2d308f97f9a7f3ff2903fb04b9d7d4
3
+ size 9293
venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/METADATA ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: charset-normalizer
3
+ Version: 3.3.2
4
+ Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
5
+ Home-page: https://github.com/Ousret/charset_normalizer
6
+ Author: Ahmed TAHRI
7
+ Author-email: [email protected]
8
+ License: MIT
9
+ Project-URL: Bug Reports, https://github.com/Ousret/charset_normalizer/issues
10
+ Project-URL: Documentation, https://charset-normalizer.readthedocs.io/en/latest
11
+ Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect
12
+ Classifier: Development Status :: 5 - Production/Stable
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3.12
25
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
26
+ Classifier: Topic :: Text Processing :: Linguistic
27
+ Classifier: Topic :: Utilities
28
+ Classifier: Typing :: Typed
29
+ Requires-Python: >=3.7.0
30
+ Description-Content-Type: text/markdown
31
+ License-File: LICENSE
32
+ Provides-Extra: unicode_backport
33
+
34
+ <h1 align="center">Charset Detection, for Everyone 👋</h1>
35
+
36
+ <p align="center">
37
+ <sup>The Real First Universal Charset Detector</sup><br>
38
+ <a href="https://pypi.org/project/charset-normalizer">
39
+ <img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" />
40
+ </a>
41
+ <a href="https://pepy.tech/project/charset-normalizer/">
42
+ <img alt="Download Count Total" src="https://static.pepy.tech/badge/charset-normalizer/month" />
43
+ </a>
44
+ <a href="https://bestpractices.coreinfrastructure.org/projects/7297">
45
+ <img src="https://bestpractices.coreinfrastructure.org/projects/7297/badge">
46
+ </a>
47
+ </p>
48
+ <p align="center">
49
+ <sup><i>Featured Packages</i></sup><br>
50
+ <a href="https://github.com/jawah/niquests">
51
+ <img alt="Static Badge" src="https://img.shields.io/badge/Niquests-HTTP_1.1%2C%202%2C_and_3_Client-cyan">
52
+ </a>
53
+ <a href="https://github.com/jawah/wassima">
54
+ <img alt="Static Badge" src="https://img.shields.io/badge/Wassima-Certifi_Killer-cyan">
55
+ </a>
56
+ </p>
57
+ <p align="center">
58
+ <sup><i>In other language (unofficial port - by the community)</i></sup><br>
59
+ <a href="https://github.com/nickspring/charset-normalizer-rs">
60
+ <img alt="Static Badge" src="https://img.shields.io/badge/Rust-red">
61
+ </a>
62
+ </p>
63
+
64
+ > A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`,
65
+ > I'm trying to resolve the issue by taking a new approach.
66
+ > All IANA character set names for which the Python core library provides codecs are supported.
67
+
68
+ <p align="center">
69
+ >>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<<
70
+ </p>
71
+
72
+ This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
73
+
74
+ | Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
75
+ |--------------------------------------------------|:---------------------------------------------:|:--------------------------------------------------------------------------------------------------:|:-----------------------------------------------:|
76
+ | `Fast` | ❌ | ✅ | ✅ |
77
+ | `Universal**` | ❌ | ✅ | ❌ |
78
+ | `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ |
79
+ | `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
80
+ | `License` | LGPL-2.1<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ |
81
+ | `Native Python` | ✅ | ✅ | ❌ |
82
+ | `Detect spoken language` | ❌ | ✅ | N/A |
83
+ | `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ |
84
+ | `Whl Size (min)` | 193.6 kB | 42 kB | ~200 kB |
85
+ | `Supported Encoding` | 33 | 🎉 [99](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 |
86
+
87
+ <p align="center">
88
+ <img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/>
89
+ </p>
90
+
91
+ *\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*<br>
92
+ Did you got there because of the logs? See [https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html](https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html)
93
+
94
+ ## ⚡ Performance
95
+
96
+ This package offer better performance than its counterpart Chardet. Here are some numbers.
97
+
98
+ | Package | Accuracy | Mean per file (ms) | File per sec (est) |
99
+ |-----------------------------------------------|:--------:|:------------------:|:------------------:|
100
+ | [chardet](https://github.com/chardet/chardet) | 86 % | 200 ms | 5 file/sec |
101
+ | charset-normalizer | **98 %** | **10 ms** | 100 file/sec |
102
+
103
+ | Package | 99th percentile | 95th percentile | 50th percentile |
104
+ |-----------------------------------------------|:---------------:|:---------------:|:---------------:|
105
+ | [chardet](https://github.com/chardet/chardet) | 1200 ms | 287 ms | 23 ms |
106
+ | charset-normalizer | 100 ms | 50 ms | 5 ms |
107
+
108
+ Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.
109
+
110
+ > Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
111
+ > And yes, these results might change at any time. The dataset can be updated to include more files.
112
+ > The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
113
+ > Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability
114
+ > (eg. Supported Encoding) Challenge-them if you want.
115
+
116
+ ## ✨ Installation
117
+
118
+ Using pip:
119
+
120
+ ```sh
121
+ pip install charset-normalizer -U
122
+ ```
123
+
124
+ ## 🚀 Basic Usage
125
+
126
+ ### CLI
127
+ This package comes with a CLI.
128
+
129
+ ```
130
+ usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
131
+ file [file ...]
132
+
133
+ The Real First Universal Charset Detector. Discover originating encoding used
134
+ on text file. Normalize text to unicode.
135
+
136
+ positional arguments:
137
+ files File(s) to be analysed
138
+
139
+ optional arguments:
140
+ -h, --help show this help message and exit
141
+ -v, --verbose Display complementary information about file if any.
142
+ Stdout will contain logs about the detection process.
143
+ -a, --with-alternative
144
+ Output complementary possibilities if any. Top-level
145
+ JSON WILL be a list.
146
+ -n, --normalize Permit to normalize input file. If not set, program
147
+ does not write anything.
148
+ -m, --minimal Only output the charset detected to STDOUT. Disabling
149
+ JSON output.
150
+ -r, --replace Replace file when trying to normalize it instead of
151
+ creating a new one.
152
+ -f, --force Replace file without asking if you are sure, use this
153
+ flag with caution.
154
+ -t THRESHOLD, --threshold THRESHOLD
155
+ Define a custom maximum amount of chaos allowed in
156
+ decoded content. 0. <= chaos <= 1.
157
+ --version Show version information and exit.
158
+ ```
159
+
160
+ ```bash
161
+ normalizer ./data/sample.1.fr.srt
162
+ ```
163
+
164
+ or
165
+
166
+ ```bash
167
+ python -m charset_normalizer ./data/sample.1.fr.srt
168
+ ```
169
+
170
+ 🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
171
+
172
+ ```json
173
+ {
174
+ "path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
175
+ "encoding": "cp1252",
176
+ "encoding_aliases": [
177
+ "1252",
178
+ "windows_1252"
179
+ ],
180
+ "alternative_encodings": [
181
+ "cp1254",
182
+ "cp1256",
183
+ "cp1258",
184
+ "iso8859_14",
185
+ "iso8859_15",
186
+ "iso8859_16",
187
+ "iso8859_3",
188
+ "iso8859_9",
189
+ "latin_1",
190
+ "mbcs"
191
+ ],
192
+ "language": "French",
193
+ "alphabets": [
194
+ "Basic Latin",
195
+ "Latin-1 Supplement"
196
+ ],
197
+ "has_sig_or_bom": false,
198
+ "chaos": 0.149,
199
+ "coherence": 97.152,
200
+ "unicode_path": null,
201
+ "is_preferred": true
202
+ }
203
+ ```
204
+
205
+ ### Python
206
+ *Just print out normalized text*
207
+ ```python
208
+ from charset_normalizer import from_path
209
+
210
+ results = from_path('./my_subtitle.srt')
211
+
212
+ print(str(results.best()))
213
+ ```
214
+
215
+ *Upgrade your code without effort*
216
+ ```python
217
+ from charset_normalizer import detect
218
+ ```
219
+
220
+ The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
221
+
222
+ See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
223
+
224
+ ## 😇 Why
225
+
226
+ When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
227
+ reliable alternative using a completely different method. Also! I never back down on a good challenge!
228
+
229
+ I **don't care** about the **originating charset** encoding, because **two different tables** can
230
+ produce **two identical rendered string.**
231
+ What I want is to get readable text, the best I can.
232
+
233
+ In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
234
+
235
+ Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
236
+
237
+ ## 🍰 How
238
+
239
+ - Discard all charset encoding table that could not fit the binary content.
240
+ - Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding.
241
+ - Extract matches with the lowest mess detected.
242
+ - Additionally, we measure coherence / probe for a language.
243
+
244
+ **Wait a minute**, what is noise/mess and coherence according to **YOU ?**
245
+
246
+ *Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
247
+ **I established** some ground rules about **what is obvious** when **it seems like** a mess.
248
+ I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to
249
+ improve or rewrite it.
250
+
251
+ *Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
252
+ that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
253
+
254
+ ## ⚡ Known limitations
255
+
256
+ - Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
257
+ - Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
258
+
259
+ ## ⚠️ About Python EOLs
260
+
261
+ **If you are running:**
262
+
263
+ - Python >=2.7,<3.5: Unsupported
264
+ - Python 3.5: charset-normalizer < 2.1
265
+ - Python 3.6: charset-normalizer < 3.1
266
+ - Python 3.7: charset-normalizer < 4.0
267
+
268
+ Upgrade your Python interpreter as soon as possible.
269
+
270
+ ## 👤 Contributing
271
+
272
+ Contributions, issues and feature requests are very much welcome.<br />
273
+ Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
274
+
275
+ ## 📝 License
276
+
277
+ Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br />
278
+ This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
279
+
280
+ Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
281
+
282
+ ## 💼 For Enterprise
283
+
284
+ Professional support for charset-normalizer is available as part of the [Tidelift
285
+ Subscription][1]. Tidelift gives software development teams a single source for
286
+ purchasing and maintaining their software, with professional grade assurances
287
+ from the experts who know it best, while seamlessly integrating with existing
288
+ tools.
289
+
290
+ [1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme
291
+
292
+ # Changelog
293
+ All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
294
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
295
+
296
+ ## [3.3.2](https://github.com/Ousret/charset_normalizer/compare/3.3.1...3.3.2) (2023-10-31)
297
+
298
+ ### Fixed
299
+ - Unintentional memory usage regression when using large payload that match several encoding (#376)
300
+ - Regression on some detection case showcased in the documentation (#371)
301
+
302
+ ### Added
303
+ - Noise (md) probe that identify malformed arabic representation due to the presence of letters in isolated form (credit to my wife)
304
+
305
+ ## [3.3.1](https://github.com/Ousret/charset_normalizer/compare/3.3.0...3.3.1) (2023-10-22)
306
+
307
+ ### Changed
308
+ - Optional mypyc compilation upgraded to version 1.6.1 for Python >= 3.8
309
+ - Improved the general detection reliability based on reports from the community
310
+
311
+ ## [3.3.0](https://github.com/Ousret/charset_normalizer/compare/3.2.0...3.3.0) (2023-09-30)
312
+
313
+ ### Added
314
+ - Allow to execute the CLI (e.g. normalizer) through `python -m charset_normalizer.cli` or `python -m charset_normalizer`
315
+ - Support for 9 forgotten encoding that are supported by Python but unlisted in `encoding.aliases` as they have no alias (#323)
316
+
317
+ ### Removed
318
+ - (internal) Redundant utils.is_ascii function and unused function is_private_use_only
319
+ - (internal) charset_normalizer.assets is moved inside charset_normalizer.constant
320
+
321
+ ### Changed
322
+ - (internal) Unicode code blocks in constants are updated using the latest v15.0.0 definition to improve detection
323
+ - Optional mypyc compilation upgraded to version 1.5.1 for Python >= 3.8
324
+
325
+ ### Fixed
326
+ - Unable to properly sort CharsetMatch when both chaos/noise and coherence were close due to an unreachable condition in \_\_lt\_\_ (#350)
327
+
328
+ ## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07)
329
+
330
+ ### Changed
331
+ - Typehint for function `from_path` no longer enforce `PathLike` as its first argument
332
+ - Minor improvement over the global detection reliability
333
+
334
+ ### Added
335
+ - Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries
336
+ - Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True)
337
+ - Explicit support for Python 3.12
338
+
339
+ ### Fixed
340
+ - Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289)
341
+
342
+ ## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06)
343
+
344
+ ### Added
345
+ - Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262)
346
+
347
+ ### Removed
348
+ - Support for Python 3.6 (PR #260)
349
+
350
+ ### Changed
351
+ - Optional speedup provided by mypy/c 1.0.1
352
+
353
+ ## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18)
354
+
355
+ ### Fixed
356
+ - Multi-bytes cutter/chunk generator did not always cut correctly (PR #233)
357
+
358
+ ### Changed
359
+ - Speedup provided by mypy/c 0.990 on Python >= 3.7
360
+
361
+ ## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20)
362
+
363
+ ### Added
364
+ - Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
365
+ - Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
366
+ - Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
367
+ - `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
368
+
369
+ ### Changed
370
+ - Build with static metadata using 'build' frontend
371
+ - Make the language detection stricter
372
+ - Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
373
+
374
+ ### Fixed
375
+ - CLI with opt --normalize fail when using full path for files
376
+ - TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
377
+ - Sphinx warnings when generating the documentation
378
+
379
+ ### Removed
380
+ - Coherence detector no longer return 'Simple English' instead return 'English'
381
+ - Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
382
+ - Breaking: Method `first()` and `best()` from CharsetMatch
383
+ - UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
384
+ - Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
385
+ - Breaking: Top-level function `normalize`
386
+ - Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
387
+ - Support for the backport `unicodedata2`
388
+
389
+ ## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18)
390
+
391
+ ### Added
392
+ - Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
393
+ - Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
394
+ - Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
395
+
396
+ ### Changed
397
+ - Build with static metadata using 'build' frontend
398
+ - Make the language detection stricter
399
+
400
+ ### Fixed
401
+ - CLI with opt --normalize fail when using full path for files
402
+ - TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
403
+
404
+ ### Removed
405
+ - Coherence detector no longer return 'Simple English' instead return 'English'
406
+ - Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
407
+
408
+ ## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21)
409
+
410
+ ### Added
411
+ - `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
412
+
413
+ ### Removed
414
+ - Breaking: Method `first()` and `best()` from CharsetMatch
415
+ - UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
416
+
417
+ ### Fixed
418
+ - Sphinx warnings when generating the documentation
419
+
420
+ ## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15)
421
+
422
+ ### Changed
423
+ - Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
424
+
425
+ ### Removed
426
+ - Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
427
+ - Breaking: Top-level function `normalize`
428
+ - Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
429
+ - Support for the backport `unicodedata2`
430
+
431
+ ## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19)
432
+
433
+ ### Deprecated
434
+ - Function `normalize` scheduled for removal in 3.0
435
+
436
+ ### Changed
437
+ - Removed useless call to decode in fn is_unprintable (#206)
438
+
439
+ ### Fixed
440
+ - Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204)
441
+
442
+ ## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19)
443
+
444
+ ### Added
445
+ - Output the Unicode table version when running the CLI with `--version` (PR #194)
446
+
447
+ ### Changed
448
+ - Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175)
449
+ - Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183)
450
+
451
+ ### Fixed
452
+ - Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175)
453
+ - CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181)
454
+
455
+ ### Removed
456
+ - Support for Python 3.5 (PR #192)
457
+
458
+ ### Deprecated
459
+ - Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194)
460
+
461
+ ## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12)
462
+
463
+ ### Fixed
464
+ - ASCII miss-detection on rare cases (PR #170)
465
+
466
+ ## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30)
467
+
468
+ ### Added
469
+ - Explicit support for Python 3.11 (PR #164)
470
+
471
+ ### Changed
472
+ - The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165)
473
+
474
+ ## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04)
475
+
476
+ ### Fixed
477
+ - Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154)
478
+
479
+ ### Changed
480
+ - Skipping the language-detection (CD) on ASCII (PR #155)
481
+
482
+ ## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03)
483
+
484
+ ### Changed
485
+ - Moderating the logging impact (since 2.0.8) for specific environments (PR #147)
486
+
487
+ ### Fixed
488
+ - Wrong logging level applied when setting kwarg `explain` to True (PR #146)
489
+
490
+ ## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24)
491
+ ### Changed
492
+ - Improvement over Vietnamese detection (PR #126)
493
+ - MD improvement on trailing data and long foreign (non-pure latin) data (PR #124)
494
+ - Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122)
495
+ - call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129)
496
+ - Code style as refactored by Sourcery-AI (PR #131)
497
+ - Minor adjustment on the MD around european words (PR #133)
498
+ - Remove and replace SRTs from assets / tests (PR #139)
499
+ - Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135)
500
+ - Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135)
501
+
502
+ ### Fixed
503
+ - Fix large (misleading) sequence giving UnicodeDecodeError (PR #137)
504
+ - Avoid using too insignificant chunk (PR #137)
505
+
506
+ ### Added
507
+ - Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135)
508
+ - Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141)
509
+
510
+ ## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11)
511
+ ### Added
512
+ - Add support for Kazakh (Cyrillic) language detection (PR #109)
513
+
514
+ ### Changed
515
+ - Further, improve inferring the language from a given single-byte code page (PR #112)
516
+ - Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116)
517
+ - Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113)
518
+ - Various detection improvement (MD+CD) (PR #117)
519
+
520
+ ### Removed
521
+ - Remove redundant logging entry about detected language(s) (PR #115)
522
+
523
+ ### Fixed
524
+ - Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102)
525
+
526
+ ## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18)
527
+ ### Fixed
528
+ - Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100)
529
+ - Fix CLI crash when using --minimal output in certain cases (PR #103)
530
+
531
+ ### Changed
532
+ - Minor improvement to the detection efficiency (less than 1%) (PR #106 #101)
533
+
534
+ ## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14)
535
+ ### Changed
536
+ - The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81)
537
+ - The BC-support with v1.x was improved, the old staticmethods are restored (PR #82)
538
+ - The Unicode detection is slightly improved (PR #93)
539
+ - Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91)
540
+
541
+ ### Removed
542
+ - The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92)
543
+
544
+ ### Fixed
545
+ - In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95)
546
+ - Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96)
547
+ - The MANIFEST.in was not exhaustive (PR #78)
548
+
549
+ ## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30)
550
+ ### Fixed
551
+ - The CLI no longer raise an unexpected exception when no encoding has been found (PR #70)
552
+ - Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68)
553
+ - The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72)
554
+ - Submatch factoring could be wrong in rare edge cases (PR #72)
555
+ - Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72)
556
+ - Fix line endings from CRLF to LF for certain project files (PR #67)
557
+
558
+ ### Changed
559
+ - Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76)
560
+ - Allow fallback on specified encoding if any (PR #71)
561
+
562
+ ## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16)
563
+ ### Changed
564
+ - Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63)
565
+ - According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64)
566
+
567
+ ## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15)
568
+ ### Fixed
569
+ - Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59)
570
+
571
+ ### Changed
572
+ - Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57)
573
+
574
+ ## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13)
575
+ ### Fixed
576
+ - Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55)
577
+ - Using explain=False permanently disable the verbose output in the current runtime (PR #47)
578
+ - One log entry (language target preemptive) was not show in logs when using explain=True (PR #47)
579
+ - Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52)
580
+
581
+ ### Changed
582
+ - Public function normalize default args values were not aligned with from_bytes (PR #53)
583
+
584
+ ### Added
585
+ - You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47)
586
+
587
+ ## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02)
588
+ ### Changed
589
+ - 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet.
590
+ - Accent has been made on UTF-8 detection, should perform rather instantaneous.
591
+ - The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible.
592
+ - The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time)
593
+ - The program has been rewritten to ease the readability and maintainability. (+Using static typing)+
594
+ - utf_7 detection has been reinstated.
595
+
596
+ ### Removed
597
+ - This package no longer require anything when used with Python 3.5 (Dropped cached_property)
598
+ - Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian.
599
+ - The exception hook on UnicodeDecodeError has been removed.
600
+
601
+ ### Deprecated
602
+ - Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0
603
+
604
+ ### Fixed
605
+ - The CLI output used the relative path of the file(s). Should be absolute.
606
+
607
+ ## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28)
608
+ ### Fixed
609
+ - Logger configuration/usage no longer conflict with others (PR #44)
610
+
611
+ ## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21)
612
+ ### Removed
613
+ - Using standard logging instead of using the package loguru.
614
+ - Dropping nose test framework in favor of the maintained pytest.
615
+ - Choose to not use dragonmapper package to help with gibberish Chinese/CJK text.
616
+ - Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version.
617
+ - Stop support for UTF-7 that does not contain a SIG.
618
+ - Dropping PrettyTable, replaced with pure JSON output in CLI.
619
+
620
+ ### Fixed
621
+ - BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process.
622
+ - Not searching properly for the BOM when trying utf32/16 parent codec.
623
+
624
+ ### Changed
625
+ - Improving the package final size by compressing frequencies.json.
626
+ - Huge improvement over the larges payload.
627
+
628
+ ### Added
629
+ - CLI now produces JSON consumable output.
630
+ - Return ASCII if given sequences fit. Given reasonable confidence.
631
+
632
+ ## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13)
633
+
634
+ ### Fixed
635
+ - In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40)
636
+
637
+ ## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12)
638
+
639
+ ### Fixed
640
+ - Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39)
641
+
642
+ ## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12)
643
+
644
+ ### Fixed
645
+ - The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38)
646
+
647
+ ## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09)
648
+
649
+ ### Changed
650
+ - Amend the previous release to allow prettytable 2.0 (PR #35)
651
+
652
+ ## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08)
653
+
654
+ ### Fixed
655
+ - Fix error while using the package with a python pre-release interpreter (PR #33)
656
+
657
+ ### Changed
658
+ - Dependencies refactoring, constraints revised.
659
+
660
+ ### Added
661
+ - Add python 3.9 and 3.10 to the supported interpreters
662
+
663
+ MIT License
664
+
665
+ Copyright (c) 2019 TAHRI Ahmed R.
666
+
667
+ Permission is hereby granted, free of charge, to any person obtaining a copy
668
+ of this software and associated documentation files (the "Software"), to deal
669
+ in the Software without restriction, including without limitation the rights
670
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
671
+ copies of the Software, and to permit persons to whom the Software is
672
+ furnished to do so, subject to the following conditions:
673
+
674
+ The above copyright notice and this permission notice shall be included in all
675
+ copies or substantial portions of the Software.
676
+
677
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
678
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
679
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
680
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
681
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
682
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
683
+ SOFTWARE.
venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/RECORD ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/normalizer,sha256=REM3LcJfqufYO8naoFIZ6mi9GSXTqUkJzrQL1NUoijo,262
2
+ charset_normalizer-3.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ charset_normalizer-3.3.2.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070
4
+ charset_normalizer-3.3.2.dist-info/METADATA,sha256=cfLhl5A6SI-F0oclm8w8ux9wshL1nipdeCdVnYb4AaA,33550
5
+ charset_normalizer-3.3.2.dist-info/RECORD,,
6
+ charset_normalizer-3.3.2.dist-info/WHEEL,sha256=cD39NF6a3hkhaWoPQJng7gnGZRIfQsUCtwcedITCPtg,152
7
+ charset_normalizer-3.3.2.dist-info/entry_points.txt,sha256=ADSTKrkXZ3hhdOVFi6DcUEHQRS0xfxDIE_pEz4wLIXA,65
8
+ charset_normalizer-3.3.2.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
9
+ charset_normalizer/__init__.py,sha256=UzI3xC8PhmcLRMzSgPb6minTmRq0kWznnCBJ8ZCc2XI,1577
10
+ charset_normalizer/__main__.py,sha256=JxY8bleaENOFlLRb9HfoeZCzAMnn2A1oGR5Xm2eyqg0,73
11
+ charset_normalizer/__pycache__/__init__.cpython-310.pyc,,
12
+ charset_normalizer/__pycache__/__main__.cpython-310.pyc,,
13
+ charset_normalizer/__pycache__/api.cpython-310.pyc,,
14
+ charset_normalizer/__pycache__/cd.cpython-310.pyc,,
15
+ charset_normalizer/__pycache__/constant.cpython-310.pyc,,
16
+ charset_normalizer/__pycache__/legacy.cpython-310.pyc,,
17
+ charset_normalizer/__pycache__/md.cpython-310.pyc,,
18
+ charset_normalizer/__pycache__/models.cpython-310.pyc,,
19
+ charset_normalizer/__pycache__/utils.cpython-310.pyc,,
20
+ charset_normalizer/__pycache__/version.cpython-310.pyc,,
21
+ charset_normalizer/api.py,sha256=WOlWjy6wT8SeMYFpaGbXZFN1TMXa-s8vZYfkL4G29iQ,21097
22
+ charset_normalizer/cd.py,sha256=xwZliZcTQFA3jU0c00PRiu9MNxXTFxQkFLWmMW24ZzI,12560
23
+ charset_normalizer/cli/__init__.py,sha256=D5ERp8P62llm2FuoMzydZ7d9rs8cvvLXqE-1_6oViPc,100
24
+ charset_normalizer/cli/__main__.py,sha256=2F-xURZJzo063Ye-2RLJ2wcmURpbKeAzKwpiws65dAs,9744
25
+ charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc,,
26
+ charset_normalizer/cli/__pycache__/__main__.cpython-310.pyc,,
27
+ charset_normalizer/constant.py,sha256=p0IsOVcEbPWYPOdWhnhRbjK1YVBy6fs05C5vKC-zoxU,40481
28
+ charset_normalizer/legacy.py,sha256=T-QuVMsMeDiQEk8WSszMrzVJg_14AMeSkmHdRYhdl1k,2071
29
+ charset_normalizer/md.cpython-310-x86_64-linux-gnu.so,sha256=Y7QSLD5QLoSFAWys0-tL7R6QB7oi5864zM6zr7RWek4,16064
30
+ charset_normalizer/md.py,sha256=NkSuVLK13_a8c7BxZ4cGIQ5vOtGIWOdh22WZEvjp-7U,19624
31
+ charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so,sha256=y2N-LgwRp7TCdgRqsmIM8UvKeavC0t8kx_hdRvaSfcY,268472
32
+ charset_normalizer/models.py,sha256=I5i0s4aKCCgLPY2tUY3pwkgFA-BUbbNxQ7hVkVTt62s,11624
33
+ charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ charset_normalizer/utils.py,sha256=teiosMqzKjXyAHXnGdjSBOgnBZwx-SkBbCLrx0UXy8M,11894
35
+ charset_normalizer/version.py,sha256=iHKUfHD3kDRSyrh_BN2ojh43TA5-UZQjvbVIEFfpHDs,79
venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.41.2)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ normalizer = charset_normalizer.cli:cli_detect
venv/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ charset_normalizer
venv/lib/python3.10/site-packages/frozenlist/__init__.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import types
4
+ from collections.abc import MutableSequence
5
+ from functools import total_ordering
6
+ from typing import Type
7
+
8
+ __version__ = "1.4.1"
9
+
10
+ __all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...]
11
+
12
+
13
+ NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool
14
+
15
+
16
+ @total_ordering
17
+ class FrozenList(MutableSequence):
18
+ __slots__ = ("_frozen", "_items")
19
+
20
+ if sys.version_info >= (3, 9):
21
+ __class_getitem__ = classmethod(types.GenericAlias)
22
+ else:
23
+
24
+ @classmethod
25
+ def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]:
26
+ return cls
27
+
28
+ def __init__(self, items=None):
29
+ self._frozen = False
30
+ if items is not None:
31
+ items = list(items)
32
+ else:
33
+ items = []
34
+ self._items = items
35
+
36
+ @property
37
+ def frozen(self):
38
+ return self._frozen
39
+
40
+ def freeze(self):
41
+ self._frozen = True
42
+
43
+ def __getitem__(self, index):
44
+ return self._items[index]
45
+
46
+ def __setitem__(self, index, value):
47
+ if self._frozen:
48
+ raise RuntimeError("Cannot modify frozen list.")
49
+ self._items[index] = value
50
+
51
+ def __delitem__(self, index):
52
+ if self._frozen:
53
+ raise RuntimeError("Cannot modify frozen list.")
54
+ del self._items[index]
55
+
56
+ def __len__(self):
57
+ return self._items.__len__()
58
+
59
+ def __iter__(self):
60
+ return self._items.__iter__()
61
+
62
+ def __reversed__(self):
63
+ return self._items.__reversed__()
64
+
65
+ def __eq__(self, other):
66
+ return list(self) == other
67
+
68
+ def __le__(self, other):
69
+ return list(self) <= other
70
+
71
+ def insert(self, pos, item):
72
+ if self._frozen:
73
+ raise RuntimeError("Cannot modify frozen list.")
74
+ self._items.insert(pos, item)
75
+
76
+ def __repr__(self):
77
+ return f"<FrozenList(frozen={self._frozen}, {self._items!r})>"
78
+
79
+ def __hash__(self):
80
+ if self._frozen:
81
+ return hash(tuple(self))
82
+ else:
83
+ raise RuntimeError("Cannot hash unfrozen list.")
84
+
85
+
86
+ PyFrozenList = FrozenList
87
+
88
+
89
+ if not NO_EXTENSIONS:
90
+ try:
91
+ from ._frozenlist import FrozenList as CFrozenList # type: ignore
92
+ except ImportError: # pragma: no cover
93
+ pass
94
+ else:
95
+ FrozenList = CFrozenList # type: ignore
venv/lib/python3.10/site-packages/frozenlist/__init__.pyi ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Generic,
3
+ Iterable,
4
+ Iterator,
5
+ List,
6
+ MutableSequence,
7
+ Optional,
8
+ TypeVar,
9
+ Union,
10
+ overload,
11
+ )
12
+
13
+ _T = TypeVar("_T")
14
+ _Arg = Union[List[_T], Iterable[_T]]
15
+
16
+ class FrozenList(MutableSequence[_T], Generic[_T]):
17
+ def __init__(self, items: Optional[_Arg[_T]] = None) -> None: ...
18
+ @property
19
+ def frozen(self) -> bool: ...
20
+ def freeze(self) -> None: ...
21
+ @overload
22
+ def __getitem__(self, i: int) -> _T: ...
23
+ @overload
24
+ def __getitem__(self, s: slice) -> FrozenList[_T]: ...
25
+ @overload
26
+ def __setitem__(self, i: int, o: _T) -> None: ...
27
+ @overload
28
+ def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ...
29
+ @overload
30
+ def __delitem__(self, i: int) -> None: ...
31
+ @overload
32
+ def __delitem__(self, i: slice) -> None: ...
33
+ def __len__(self) -> int: ...
34
+ def __iter__(self) -> Iterator[_T]: ...
35
+ def __reversed__(self) -> Iterator[_T]: ...
36
+ def __eq__(self, other: object) -> bool: ...
37
+ def __le__(self, other: FrozenList[_T]) -> bool: ...
38
+ def __ne__(self, other: object) -> bool: ...
39
+ def __lt__(self, other: FrozenList[_T]) -> bool: ...
40
+ def __ge__(self, other: FrozenList[_T]) -> bool: ...
41
+ def __gt__(self, other: FrozenList[_T]) -> bool: ...
42
+ def insert(self, pos: int, item: _T) -> None: ...
43
+ def __repr__(self) -> str: ...
44
+ def __hash__(self) -> int: ...
45
+
46
+ # types for C accelerators are the same
47
+ CFrozenList = PyFrozenList = FrozenList
venv/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.25 kB). View file
 
venv/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (766 kB). View file
 
venv/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import types
3
+ from collections.abc import MutableSequence
4
+
5
+
6
+ cdef class FrozenList:
7
+
8
+ if sys.version_info >= (3, 9):
9
+ __class_getitem__ = classmethod(types.GenericAlias)
10
+ else:
11
+ @classmethod
12
+ def __class_getitem__(cls):
13
+ return cls
14
+
15
+ cdef readonly bint frozen
16
+ cdef list _items
17
+
18
+ def __init__(self, items=None):
19
+ self.frozen = False
20
+ if items is not None:
21
+ items = list(items)
22
+ else:
23
+ items = []
24
+ self._items = items
25
+
26
+ cdef object _check_frozen(self):
27
+ if self.frozen:
28
+ raise RuntimeError("Cannot modify frozen list.")
29
+
30
+ cdef inline object _fast_len(self):
31
+ return len(self._items)
32
+
33
+ def freeze(self):
34
+ self.frozen = True
35
+
36
+ def __getitem__(self, index):
37
+ return self._items[index]
38
+
39
+ def __setitem__(self, index, value):
40
+ self._check_frozen()
41
+ self._items[index] = value
42
+
43
+ def __delitem__(self, index):
44
+ self._check_frozen()
45
+ del self._items[index]
46
+
47
+ def __len__(self):
48
+ return self._fast_len()
49
+
50
+ def __iter__(self):
51
+ return self._items.__iter__()
52
+
53
+ def __reversed__(self):
54
+ return self._items.__reversed__()
55
+
56
+ def __richcmp__(self, other, op):
57
+ if op == 0: # <
58
+ return list(self) < other
59
+ if op == 1: # <=
60
+ return list(self) <= other
61
+ if op == 2: # ==
62
+ return list(self) == other
63
+ if op == 3: # !=
64
+ return list(self) != other
65
+ if op == 4: # >
66
+ return list(self) > other
67
+ if op == 5: # =>
68
+ return list(self) >= other
69
+
70
+ def insert(self, pos, item):
71
+ self._check_frozen()
72
+ self._items.insert(pos, item)
73
+
74
+ def __contains__(self, item):
75
+ return item in self._items
76
+
77
+ def __iadd__(self, items):
78
+ self._check_frozen()
79
+ self._items += list(items)
80
+ return self
81
+
82
+ def index(self, item):
83
+ return self._items.index(item)
84
+
85
+ def remove(self, item):
86
+ self._check_frozen()
87
+ self._items.remove(item)
88
+
89
+ def clear(self):
90
+ self._check_frozen()
91
+ self._items.clear()
92
+
93
+ def extend(self, items):
94
+ self._check_frozen()
95
+ self._items += list(items)
96
+
97
+ def reverse(self):
98
+ self._check_frozen()
99
+ self._items.reverse()
100
+
101
+ def pop(self, index=-1):
102
+ self._check_frozen()
103
+ return self._items.pop(index)
104
+
105
+ def append(self, item):
106
+ self._check_frozen()
107
+ return self._items.append(item)
108
+
109
+ def count(self, item):
110
+ return self._items.count(item)
111
+
112
+ def __repr__(self):
113
+ return '<FrozenList(frozen={}, {!r})>'.format(self.frozen,
114
+ self._items)
115
+
116
+ def __hash__(self):
117
+ if self.frozen:
118
+ return hash(tuple(self._items))
119
+ else:
120
+ raise RuntimeError("Cannot hash unfrozen list.")
121
+
122
+
123
+ MutableSequence.register(FrozenList)
venv/lib/python3.10/site-packages/frozenlist/py.typed ADDED
@@ -0,0 +1 @@
 
 
1
+ Marker
venv/lib/python3.10/site-packages/fsspec/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.metadata import entry_points
2
+
3
+ from . import _version, caching
4
+ from .callbacks import Callback
5
+ from .compression import available_compressions
6
+ from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs
7
+ from .exceptions import FSTimeoutError
8
+ from .mapping import FSMap, get_mapper
9
+ from .registry import (
10
+ available_protocols,
11
+ filesystem,
12
+ get_filesystem_class,
13
+ register_implementation,
14
+ registry,
15
+ )
16
+ from .spec import AbstractFileSystem
17
+
18
+ __version__ = _version.get_versions()["version"]
19
+
20
+ __all__ = [
21
+ "AbstractFileSystem",
22
+ "FSTimeoutError",
23
+ "FSMap",
24
+ "filesystem",
25
+ "register_implementation",
26
+ "get_filesystem_class",
27
+ "get_fs_token_paths",
28
+ "get_mapper",
29
+ "open",
30
+ "open_files",
31
+ "open_local",
32
+ "registry",
33
+ "caching",
34
+ "Callback",
35
+ "available_protocols",
36
+ "available_compressions",
37
+ "url_to_fs",
38
+ ]
39
+
40
+
41
+ def process_entries():
42
+ if entry_points is not None:
43
+ try:
44
+ eps = entry_points()
45
+ except TypeError:
46
+ pass # importlib-metadata < 0.8
47
+ else:
48
+ if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
49
+ specs = eps.select(group="fsspec.specs")
50
+ else:
51
+ specs = eps.get("fsspec.specs", [])
52
+ registered_names = {}
53
+ for spec in specs:
54
+ err_msg = f"Unable to load filesystem from {spec}"
55
+ name = spec.name
56
+ if name in registered_names:
57
+ continue
58
+ registered_names[name] = True
59
+ register_implementation(
60
+ name,
61
+ spec.value.replace(":", "."),
62
+ errtxt=err_msg,
63
+ # We take our implementations as the ones to overload with if
64
+ # for some reason we encounter some, may be the same, already
65
+ # registered
66
+ clobber=True,
67
+ )
68
+
69
+
70
+ process_entries()
venv/lib/python3.10/site-packages/fsspec/_version.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # This file was generated by 'versioneer.py' (0.29) from
3
+ # revision-control system data, or from the parent directory name of an
4
+ # unpacked source archive. Distribution tarballs contain a pre-generated copy
5
+ # of this file.
6
+
7
+ import json
8
+
9
+ version_json = '''
10
+ {
11
+ "date": "2024-03-18T15:33:58-0400",
12
+ "dirty": false,
13
+ "error": null,
14
+ "full-revisionid": "47b445ae4c284a82dd15e0287b1ffc410e8fc470",
15
+ "version": "2024.3.1"
16
+ }
17
+ ''' # END VERSION_JSON
18
+
19
+
20
+ def get_versions():
21
+ return json.loads(version_json)
venv/lib/python3.10/site-packages/fsspec/archive.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fsspec import AbstractFileSystem
2
+ from fsspec.utils import tokenize
3
+
4
+
5
+ class AbstractArchiveFileSystem(AbstractFileSystem):
6
+ """
7
+ A generic superclass for implementing Archive-based filesystems.
8
+
9
+ Currently, it is shared amongst
10
+ :class:`~fsspec.implementations.zip.ZipFileSystem`,
11
+ :class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and
12
+ :class:`~fsspec.implementations.tar.TarFileSystem`.
13
+ """
14
+
15
+ def __str__(self):
16
+ return f"<Archive-like object {type(self).__name__} at {id(self)}>"
17
+
18
+ __repr__ = __str__
19
+
20
+ def ukey(self, path):
21
+ return tokenize(path, self.fo, self.protocol)
22
+
23
+ def _all_dirnames(self, paths):
24
+ """Returns *all* directory names for each path in paths, including intermediate
25
+ ones.
26
+
27
+ Parameters
28
+ ----------
29
+ paths: Iterable of path strings
30
+ """
31
+ if len(paths) == 0:
32
+ return set()
33
+
34
+ dirnames = {self._parent(path) for path in paths} - {self.root_marker}
35
+ return dirnames | self._all_dirnames(dirnames)
36
+
37
+ def info(self, path, **kwargs):
38
+ self._get_dirs()
39
+ path = self._strip_protocol(path)
40
+ if path in {"", "/"} and self.dir_cache:
41
+ return {"name": "", "type": "directory", "size": 0}
42
+ if path in self.dir_cache:
43
+ return self.dir_cache[path]
44
+ elif path + "/" in self.dir_cache:
45
+ return self.dir_cache[path + "/"]
46
+ else:
47
+ raise FileNotFoundError(path)
48
+
49
+ def ls(self, path, detail=True, **kwargs):
50
+ self._get_dirs()
51
+ paths = {}
52
+ for p, f in self.dir_cache.items():
53
+ p = p.rstrip("/")
54
+ if "/" in p:
55
+ root = p.rsplit("/", 1)[0]
56
+ else:
57
+ root = ""
58
+ if root == path.rstrip("/"):
59
+ paths[p] = f
60
+ elif all(
61
+ (a == b)
62
+ for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
63
+ ):
64
+ # root directory entry
65
+ ppath = p.rstrip("/").split("/", 1)[0]
66
+ if ppath not in paths:
67
+ out = {"name": ppath, "size": 0, "type": "directory"}
68
+ paths[ppath] = out
69
+ if detail:
70
+ out = sorted(paths.values(), key=lambda _: _["name"])
71
+ return out
72
+ else:
73
+ return sorted(paths)
venv/lib/python3.10/site-packages/fsspec/asyn.py ADDED
@@ -0,0 +1,1096 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import asyncio.events
3
+ import functools
4
+ import inspect
5
+ import io
6
+ import numbers
7
+ import os
8
+ import re
9
+ import threading
10
+ from contextlib import contextmanager
11
+ from glob import has_magic
12
+ from typing import TYPE_CHECKING, Iterable
13
+
14
+ from .callbacks import DEFAULT_CALLBACK
15
+ from .exceptions import FSTimeoutError
16
+ from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
17
+ from .spec import AbstractBufferedFile, AbstractFileSystem
18
+ from .utils import glob_translate, is_exception, other_paths
19
+
20
+ private = re.compile("_[^_]")
21
+ iothread = [None] # dedicated fsspec IO thread
22
+ loop = [None] # global event loop for any non-async instance
23
+ _lock = None # global lock placeholder
24
+ get_running_loop = asyncio.get_running_loop
25
+
26
+
27
+ def get_lock():
28
+ """Allocate or return a threading lock.
29
+
30
+ The lock is allocated on first use to allow setting one lock per forked process.
31
+ """
32
+ global _lock
33
+ if not _lock:
34
+ _lock = threading.Lock()
35
+ return _lock
36
+
37
+
38
+ def reset_lock():
39
+ """Reset the global lock.
40
+
41
+ This should be called only on the init of a forked process to reset the lock to
42
+ None, enabling the new forked process to get a new lock.
43
+ """
44
+ global _lock
45
+
46
+ iothread[0] = None
47
+ loop[0] = None
48
+ _lock = None
49
+
50
+
51
+ async def _runner(event, coro, result, timeout=None):
52
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
53
+ if timeout is not None:
54
+ coro = asyncio.wait_for(coro, timeout=timeout)
55
+ try:
56
+ result[0] = await coro
57
+ except Exception as ex:
58
+ result[0] = ex
59
+ finally:
60
+ event.set()
61
+
62
+
63
+ def sync(loop, func, *args, timeout=None, **kwargs):
64
+ """
65
+ Make loop run coroutine until it returns. Runs in other thread
66
+
67
+ Examples
68
+ --------
69
+ >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
70
+ timeout=timeout, **kwargs)
71
+ """
72
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
73
+ # NB: if the loop is not running *yet*, it is OK to submit work
74
+ # and we will wait for it
75
+ if loop is None or loop.is_closed():
76
+ raise RuntimeError("Loop is not running")
77
+ try:
78
+ loop0 = asyncio.events.get_running_loop()
79
+ if loop0 is loop:
80
+ raise NotImplementedError("Calling sync() from within a running loop")
81
+ except NotImplementedError:
82
+ raise
83
+ except RuntimeError:
84
+ pass
85
+ coro = func(*args, **kwargs)
86
+ result = [None]
87
+ event = threading.Event()
88
+ asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
89
+ while True:
90
+ # this loops allows thread to get interrupted
91
+ if event.wait(1):
92
+ break
93
+ if timeout is not None:
94
+ timeout -= 1
95
+ if timeout < 0:
96
+ raise FSTimeoutError
97
+
98
+ return_result = result[0]
99
+ if isinstance(return_result, asyncio.TimeoutError):
100
+ # suppress asyncio.TimeoutError, raise FSTimeoutError
101
+ raise FSTimeoutError from return_result
102
+ elif isinstance(return_result, BaseException):
103
+ raise return_result
104
+ else:
105
+ return return_result
106
+
107
+
108
+ def sync_wrapper(func, obj=None):
109
+ """Given a function, make so can be called in blocking contexts
110
+
111
+ Leave obj=None if defining within a class. Pass the instance if attaching
112
+ as an attribute of the instance.
113
+ """
114
+
115
+ @functools.wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ self = obj or args[0]
118
+ return sync(self.loop, func, *args, **kwargs)
119
+
120
+ return wrapper
121
+
122
+
123
+ @contextmanager
124
+ def _selector_policy():
125
+ original_policy = asyncio.get_event_loop_policy()
126
+ try:
127
+ if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
128
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
129
+
130
+ yield
131
+ finally:
132
+ asyncio.set_event_loop_policy(original_policy)
133
+
134
+
135
+ def get_loop():
136
+ """Create or return the default fsspec IO loop
137
+
138
+ The loop will be running on a separate thread.
139
+ """
140
+ if loop[0] is None:
141
+ with get_lock():
142
+ # repeat the check just in case the loop got filled between the
143
+ # previous two calls from another thread
144
+ if loop[0] is None:
145
+ with _selector_policy():
146
+ loop[0] = asyncio.new_event_loop()
147
+ th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
148
+ th.daemon = True
149
+ th.start()
150
+ iothread[0] = th
151
+ return loop[0]
152
+
153
+
154
+ if TYPE_CHECKING:
155
+ import resource
156
+
157
+ ResourceError = resource.error
158
+ else:
159
+ try:
160
+ import resource
161
+ except ImportError:
162
+ resource = None
163
+ ResourceError = OSError
164
+ else:
165
+ ResourceError = getattr(resource, "error", OSError)
166
+
167
+ _DEFAULT_BATCH_SIZE = 128
168
+ _NOFILES_DEFAULT_BATCH_SIZE = 1280
169
+
170
+
171
+ def _get_batch_size(nofiles=False):
172
+ from fsspec.config import conf
173
+
174
+ if nofiles:
175
+ if "nofiles_gather_batch_size" in conf:
176
+ return conf["nofiles_gather_batch_size"]
177
+ else:
178
+ if "gather_batch_size" in conf:
179
+ return conf["gather_batch_size"]
180
+ if nofiles:
181
+ return _NOFILES_DEFAULT_BATCH_SIZE
182
+ if resource is None:
183
+ return _DEFAULT_BATCH_SIZE
184
+
185
+ try:
186
+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
187
+ except (ImportError, ValueError, ResourceError):
188
+ return _DEFAULT_BATCH_SIZE
189
+
190
+ if soft_limit == resource.RLIM_INFINITY:
191
+ return -1
192
+ else:
193
+ return soft_limit // 8
194
+
195
+
196
+ def running_async() -> bool:
197
+ """Being executed by an event loop?"""
198
+ try:
199
+ asyncio.get_running_loop()
200
+ return True
201
+ except RuntimeError:
202
+ return False
203
+
204
+
205
+ async def _run_coros_in_chunks(
206
+ coros,
207
+ batch_size=None,
208
+ callback=DEFAULT_CALLBACK,
209
+ timeout=None,
210
+ return_exceptions=False,
211
+ nofiles=False,
212
+ ):
213
+ """Run the given coroutines in chunks.
214
+
215
+ Parameters
216
+ ----------
217
+ coros: list of coroutines to run
218
+ batch_size: int or None
219
+ Number of coroutines to submit/wait on simultaneously.
220
+ If -1, then it will not be any throttling. If
221
+ None, it will be inferred from _get_batch_size()
222
+ callback: fsspec.callbacks.Callback instance
223
+ Gets a relative_update when each coroutine completes
224
+ timeout: number or None
225
+ If given, each coroutine times out after this time. Note that, since
226
+ there are multiple batches, the total run time of this function will in
227
+ general be longer
228
+ return_exceptions: bool
229
+ Same meaning as in asyncio.gather
230
+ nofiles: bool
231
+ If inferring the batch_size, does this operation involve local files?
232
+ If yes, you normally expect smaller batches.
233
+ """
234
+
235
+ if batch_size is None:
236
+ batch_size = _get_batch_size(nofiles=nofiles)
237
+
238
+ if batch_size == -1:
239
+ batch_size = len(coros)
240
+
241
+ assert batch_size > 0
242
+
243
+ async def _run_coro(coro, i):
244
+ try:
245
+ return await asyncio.wait_for(coro, timeout=timeout), i
246
+ except Exception as e:
247
+ if not return_exceptions:
248
+ raise
249
+ return e, i
250
+ finally:
251
+ callback.relative_update(1)
252
+
253
+ i = 0
254
+ n = len(coros)
255
+ results = [None] * n
256
+ pending = set()
257
+
258
+ while pending or i < n:
259
+ while len(pending) < batch_size and i < n:
260
+ pending.add(asyncio.ensure_future(_run_coro(coros[i], i)))
261
+ i += 1
262
+
263
+ if not pending:
264
+ break
265
+
266
+ done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
267
+ while done:
268
+ result, k = await done.pop()
269
+ results[k] = result
270
+
271
+ return results
272
+
273
+
274
+ # these methods should be implemented as async by any async-able backend
275
+ async_methods = [
276
+ "_ls",
277
+ "_cat_file",
278
+ "_get_file",
279
+ "_put_file",
280
+ "_rm_file",
281
+ "_cp_file",
282
+ "_pipe_file",
283
+ "_expand_path",
284
+ "_info",
285
+ "_isfile",
286
+ "_isdir",
287
+ "_exists",
288
+ "_walk",
289
+ "_glob",
290
+ "_find",
291
+ "_du",
292
+ "_size",
293
+ "_mkdir",
294
+ "_makedirs",
295
+ ]
296
+
297
+
298
+ class AsyncFileSystem(AbstractFileSystem):
299
+ """Async file operations, default implementations
300
+
301
+ Passes bulk operations to asyncio.gather for concurrent operation.
302
+
303
+ Implementations that have concurrent batch operations and/or async methods
304
+ should inherit from this class instead of AbstractFileSystem. Docstrings are
305
+ copied from the un-underscored method in AbstractFileSystem, if not given.
306
+ """
307
+
308
+ # note that methods do not have docstring here; they will be copied
309
+ # for _* methods and inferred for overridden methods.
310
+
311
+ async_impl = True
312
+ mirror_sync_methods = True
313
+ disable_throttling = False
314
+
315
+ def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
316
+ self.asynchronous = asynchronous
317
+ self._pid = os.getpid()
318
+ if not asynchronous:
319
+ self._loop = loop or get_loop()
320
+ else:
321
+ self._loop = None
322
+ self.batch_size = batch_size
323
+ super().__init__(*args, **kwargs)
324
+
325
+ @property
326
+ def loop(self):
327
+ if self._pid != os.getpid():
328
+ raise RuntimeError("This class is not fork-safe")
329
+ return self._loop
330
+
331
+ async def _rm_file(self, path, **kwargs):
332
+ raise NotImplementedError
333
+
334
+ async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
335
+ # TODO: implement on_error
336
+ batch_size = batch_size or self.batch_size
337
+ path = await self._expand_path(path, recursive=recursive)
338
+ return await _run_coros_in_chunks(
339
+ [self._rm_file(p, **kwargs) for p in reversed(path)],
340
+ batch_size=batch_size,
341
+ nofiles=True,
342
+ )
343
+
344
+ async def _cp_file(self, path1, path2, **kwargs):
345
+ raise NotImplementedError
346
+
347
+ async def _copy(
348
+ self,
349
+ path1,
350
+ path2,
351
+ recursive=False,
352
+ on_error=None,
353
+ maxdepth=None,
354
+ batch_size=None,
355
+ **kwargs,
356
+ ):
357
+ if on_error is None and recursive:
358
+ on_error = "ignore"
359
+ elif on_error is None:
360
+ on_error = "raise"
361
+
362
+ if isinstance(path1, list) and isinstance(path2, list):
363
+ # No need to expand paths when both source and destination
364
+ # are provided as lists
365
+ paths1 = path1
366
+ paths2 = path2
367
+ else:
368
+ source_is_str = isinstance(path1, str)
369
+ paths1 = await self._expand_path(
370
+ path1, maxdepth=maxdepth, recursive=recursive
371
+ )
372
+ if source_is_str and (not recursive or maxdepth is not None):
373
+ # Non-recursive glob does not copy directories
374
+ paths1 = [
375
+ p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
376
+ ]
377
+ if not paths1:
378
+ return
379
+
380
+ source_is_file = len(paths1) == 1
381
+ dest_is_dir = isinstance(path2, str) and (
382
+ trailing_sep(path2) or await self._isdir(path2)
383
+ )
384
+
385
+ exists = source_is_str and (
386
+ (has_magic(path1) and source_is_file)
387
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
388
+ )
389
+ paths2 = other_paths(
390
+ paths1,
391
+ path2,
392
+ exists=exists,
393
+ flatten=not source_is_str,
394
+ )
395
+
396
+ batch_size = batch_size or self.batch_size
397
+ coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
398
+ result = await _run_coros_in_chunks(
399
+ coros, batch_size=batch_size, return_exceptions=True, nofiles=True
400
+ )
401
+
402
+ for ex in filter(is_exception, result):
403
+ if on_error == "ignore" and isinstance(ex, FileNotFoundError):
404
+ continue
405
+ raise ex
406
+
407
+ async def _pipe_file(self, path, value, **kwargs):
408
+ raise NotImplementedError
409
+
410
+ async def _pipe(self, path, value=None, batch_size=None, **kwargs):
411
+ if isinstance(path, str):
412
+ path = {path: value}
413
+ batch_size = batch_size or self.batch_size
414
+ return await _run_coros_in_chunks(
415
+ [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
416
+ batch_size=batch_size,
417
+ nofiles=True,
418
+ )
419
+
420
+ async def _process_limits(self, url, start, end):
421
+ """Helper for "Range"-based _cat_file"""
422
+ size = None
423
+ suff = False
424
+ if start is not None and start < 0:
425
+ # if start is negative and end None, end is the "suffix length"
426
+ if end is None:
427
+ end = -start
428
+ start = ""
429
+ suff = True
430
+ else:
431
+ size = size or (await self._info(url))["size"]
432
+ start = size + start
433
+ elif start is None:
434
+ start = 0
435
+ if not suff:
436
+ if end is not None and end < 0:
437
+ if start is not None:
438
+ size = size or (await self._info(url))["size"]
439
+ end = size + end
440
+ elif end is None:
441
+ end = ""
442
+ if isinstance(end, numbers.Integral):
443
+ end -= 1 # bytes range is inclusive
444
+ return f"bytes={start}-{end}"
445
+
446
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
447
+ raise NotImplementedError
448
+
449
+ async def _cat(
450
+ self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
451
+ ):
452
+ paths = await self._expand_path(path, recursive=recursive)
453
+ coros = [self._cat_file(path, **kwargs) for path in paths]
454
+ batch_size = batch_size or self.batch_size
455
+ out = await _run_coros_in_chunks(
456
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
457
+ )
458
+ if on_error == "raise":
459
+ ex = next(filter(is_exception, out), False)
460
+ if ex:
461
+ raise ex
462
+ if (
463
+ len(paths) > 1
464
+ or isinstance(path, list)
465
+ or paths[0] != self._strip_protocol(path)
466
+ ):
467
+ return {
468
+ k: v
469
+ for k, v in zip(paths, out)
470
+ if on_error != "omit" or not is_exception(v)
471
+ }
472
+ else:
473
+ return out[0]
474
+
475
+ async def _cat_ranges(
476
+ self,
477
+ paths,
478
+ starts,
479
+ ends,
480
+ max_gap=None,
481
+ batch_size=None,
482
+ on_error="return",
483
+ **kwargs,
484
+ ):
485
+ """Get the contents of byte ranges from one or more files
486
+
487
+ Parameters
488
+ ----------
489
+ paths: list
490
+ A list of of filepaths on this filesystems
491
+ starts, ends: int or list
492
+ Bytes limits of the read. If using a single int, the same value will be
493
+ used to read all the specified files.
494
+ """
495
+ # TODO: on_error
496
+ if max_gap is not None:
497
+ # use utils.merge_offset_ranges
498
+ raise NotImplementedError
499
+ if not isinstance(paths, list):
500
+ raise TypeError
501
+ if not isinstance(starts, Iterable):
502
+ starts = [starts] * len(paths)
503
+ if not isinstance(ends, Iterable):
504
+ ends = [ends] * len(paths)
505
+ if len(starts) != len(paths) or len(ends) != len(paths):
506
+ raise ValueError
507
+ coros = [
508
+ self._cat_file(p, start=s, end=e, **kwargs)
509
+ for p, s, e in zip(paths, starts, ends)
510
+ ]
511
+ batch_size = batch_size or self.batch_size
512
+ return await _run_coros_in_chunks(
513
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
514
+ )
515
+
516
+ async def _put_file(self, lpath, rpath, **kwargs):
517
+ raise NotImplementedError
518
+
519
+ async def _put(
520
+ self,
521
+ lpath,
522
+ rpath,
523
+ recursive=False,
524
+ callback=DEFAULT_CALLBACK,
525
+ batch_size=None,
526
+ maxdepth=None,
527
+ **kwargs,
528
+ ):
529
+ """Copy file(s) from local.
530
+
531
+ Copies a specific file or tree of files (if recursive=True). If rpath
532
+ ends with a "/", it will be assumed to be a directory, and target files
533
+ will go within.
534
+
535
+ The put_file method will be called concurrently on a batch of files. The
536
+ batch_size option can configure the amount of futures that can be executed
537
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
538
+ The default can be set for this instance by passing "batch_size" in the
539
+ constructor, or for all instances by setting the "gather_batch_size" key
540
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
541
+ """
542
+ if isinstance(lpath, list) and isinstance(rpath, list):
543
+ # No need to expand paths when both source and destination
544
+ # are provided as lists
545
+ rpaths = rpath
546
+ lpaths = lpath
547
+ else:
548
+ source_is_str = isinstance(lpath, str)
549
+ if source_is_str:
550
+ lpath = make_path_posix(lpath)
551
+ fs = LocalFileSystem()
552
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
553
+ if source_is_str and (not recursive or maxdepth is not None):
554
+ # Non-recursive glob does not copy directories
555
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
556
+ if not lpaths:
557
+ return
558
+
559
+ source_is_file = len(lpaths) == 1
560
+ dest_is_dir = isinstance(rpath, str) and (
561
+ trailing_sep(rpath) or await self._isdir(rpath)
562
+ )
563
+
564
+ rpath = self._strip_protocol(rpath)
565
+ exists = source_is_str and (
566
+ (has_magic(lpath) and source_is_file)
567
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
568
+ )
569
+ rpaths = other_paths(
570
+ lpaths,
571
+ rpath,
572
+ exists=exists,
573
+ flatten=not source_is_str,
574
+ )
575
+
576
+ is_dir = {l: os.path.isdir(l) for l in lpaths}
577
+ rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
578
+ file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
579
+
580
+ await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
581
+ batch_size = batch_size or self.batch_size
582
+
583
+ coros = []
584
+ callback.set_size(len(file_pairs))
585
+ for lfile, rfile in file_pairs:
586
+ put_file = callback.branch_coro(self._put_file)
587
+ coros.append(put_file(lfile, rfile, **kwargs))
588
+
589
+ return await _run_coros_in_chunks(
590
+ coros, batch_size=batch_size, callback=callback
591
+ )
592
+
593
+ async def _get_file(self, rpath, lpath, **kwargs):
594
+ raise NotImplementedError
595
+
596
+ async def _get(
597
+ self,
598
+ rpath,
599
+ lpath,
600
+ recursive=False,
601
+ callback=DEFAULT_CALLBACK,
602
+ maxdepth=None,
603
+ **kwargs,
604
+ ):
605
+ """Copy file(s) to local.
606
+
607
+ Copies a specific file or tree of files (if recursive=True). If lpath
608
+ ends with a "/", it will be assumed to be a directory, and target files
609
+ will go within. Can submit a list of paths, which may be glob-patterns
610
+ and will be expanded.
611
+
612
+ The get_file method will be called concurrently on a batch of files. The
613
+ batch_size option can configure the amount of futures that can be executed
614
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
615
+ The default can be set for this instance by passing "batch_size" in the
616
+ constructor, or for all instances by setting the "gather_batch_size" key
617
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
618
+ """
619
+ if isinstance(lpath, list) and isinstance(rpath, list):
620
+ # No need to expand paths when both source and destination
621
+ # are provided as lists
622
+ rpaths = rpath
623
+ lpaths = lpath
624
+ else:
625
+ source_is_str = isinstance(rpath, str)
626
+ # First check for rpath trailing slash as _strip_protocol removes it.
627
+ source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
628
+ rpath = self._strip_protocol(rpath)
629
+ rpaths = await self._expand_path(
630
+ rpath, recursive=recursive, maxdepth=maxdepth
631
+ )
632
+ if source_is_str and (not recursive or maxdepth is not None):
633
+ # Non-recursive glob does not copy directories
634
+ rpaths = [
635
+ p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
636
+ ]
637
+ if not rpaths:
638
+ return
639
+
640
+ lpath = make_path_posix(lpath)
641
+ source_is_file = len(rpaths) == 1
642
+ dest_is_dir = isinstance(lpath, str) and (
643
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
644
+ )
645
+
646
+ exists = source_is_str and (
647
+ (has_magic(rpath) and source_is_file)
648
+ or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
649
+ )
650
+ lpaths = other_paths(
651
+ rpaths,
652
+ lpath,
653
+ exists=exists,
654
+ flatten=not source_is_str,
655
+ )
656
+
657
+ [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
658
+ batch_size = kwargs.pop("batch_size", self.batch_size)
659
+
660
+ coros = []
661
+ callback.set_size(len(lpaths))
662
+ for lpath, rpath in zip(lpaths, rpaths):
663
+ get_file = callback.branch_coro(self._get_file)
664
+ coros.append(get_file(rpath, lpath, **kwargs))
665
+ return await _run_coros_in_chunks(
666
+ coros, batch_size=batch_size, callback=callback
667
+ )
668
+
669
+ async def _isfile(self, path):
670
+ try:
671
+ return (await self._info(path))["type"] == "file"
672
+ except: # noqa: E722
673
+ return False
674
+
675
+ async def _isdir(self, path):
676
+ try:
677
+ return (await self._info(path))["type"] == "directory"
678
+ except OSError:
679
+ return False
680
+
681
+ async def _size(self, path):
682
+ return (await self._info(path)).get("size", None)
683
+
684
+ async def _sizes(self, paths, batch_size=None):
685
+ batch_size = batch_size or self.batch_size
686
+ return await _run_coros_in_chunks(
687
+ [self._size(p) for p in paths], batch_size=batch_size
688
+ )
689
+
690
+ async def _exists(self, path, **kwargs):
691
+ try:
692
+ await self._info(path, **kwargs)
693
+ return True
694
+ except FileNotFoundError:
695
+ return False
696
+
697
+ async def _info(self, path, **kwargs):
698
+ raise NotImplementedError
699
+
700
+ async def _ls(self, path, detail=True, **kwargs):
701
+ raise NotImplementedError
702
+
703
+ async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
704
+ if maxdepth is not None and maxdepth < 1:
705
+ raise ValueError("maxdepth must be at least 1")
706
+
707
+ path = self._strip_protocol(path)
708
+ full_dirs = {}
709
+ dirs = {}
710
+ files = {}
711
+
712
+ detail = kwargs.pop("detail", False)
713
+ try:
714
+ listing = await self._ls(path, detail=True, **kwargs)
715
+ except (FileNotFoundError, OSError) as e:
716
+ if on_error == "raise":
717
+ raise
718
+ elif callable(on_error):
719
+ on_error(e)
720
+ if detail:
721
+ yield path, {}, {}
722
+ else:
723
+ yield path, [], []
724
+ return
725
+
726
+ for info in listing:
727
+ # each info name must be at least [path]/part , but here
728
+ # we check also for names like [path]/part/
729
+ pathname = info["name"].rstrip("/")
730
+ name = pathname.rsplit("/", 1)[-1]
731
+ if info["type"] == "directory" and pathname != path:
732
+ # do not include "self" path
733
+ full_dirs[name] = pathname
734
+ dirs[name] = info
735
+ elif pathname == path:
736
+ # file-like with same name as give path
737
+ files[""] = info
738
+ else:
739
+ files[name] = info
740
+
741
+ if detail:
742
+ yield path, dirs, files
743
+ else:
744
+ yield path, list(dirs), list(files)
745
+
746
+ if maxdepth is not None:
747
+ maxdepth -= 1
748
+ if maxdepth < 1:
749
+ return
750
+
751
+ for d in dirs:
752
+ async for _ in self._walk(
753
+ full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
754
+ ):
755
+ yield _
756
+
757
+ async def _glob(self, path, maxdepth=None, **kwargs):
758
+ if maxdepth is not None and maxdepth < 1:
759
+ raise ValueError("maxdepth must be at least 1")
760
+
761
+ import re
762
+
763
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
764
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
765
+ path = self._strip_protocol(path)
766
+ append_slash_to_dirname = ends_with_sep or path.endswith(
767
+ tuple(sep + "**" for sep in seps)
768
+ )
769
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
770
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
771
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
772
+
773
+ min_idx = min(idx_star, idx_qmark, idx_brace)
774
+
775
+ detail = kwargs.pop("detail", False)
776
+
777
+ if not has_magic(path):
778
+ if await self._exists(path, **kwargs):
779
+ if not detail:
780
+ return [path]
781
+ else:
782
+ return {path: await self._info(path, **kwargs)}
783
+ else:
784
+ if not detail:
785
+ return [] # glob of non-existent returns empty
786
+ else:
787
+ return {}
788
+ elif "/" in path[:min_idx]:
789
+ min_idx = path[:min_idx].rindex("/")
790
+ root = path[: min_idx + 1]
791
+ depth = path[min_idx + 1 :].count("/") + 1
792
+ else:
793
+ root = ""
794
+ depth = path[min_idx + 1 :].count("/") + 1
795
+
796
+ if "**" in path:
797
+ if maxdepth is not None:
798
+ idx_double_stars = path.find("**")
799
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
800
+ depth = depth - depth_double_stars + maxdepth
801
+ else:
802
+ depth = None
803
+
804
+ allpaths = await self._find(
805
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
806
+ )
807
+
808
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
809
+ pattern = re.compile(pattern)
810
+
811
+ out = {
812
+ p: info
813
+ for p, info in sorted(allpaths.items())
814
+ if pattern.match(
815
+ (
816
+ p + "/"
817
+ if append_slash_to_dirname and info["type"] == "directory"
818
+ else p
819
+ )
820
+ )
821
+ }
822
+
823
+ if detail:
824
+ return out
825
+ else:
826
+ return list(out)
827
+
828
+ async def _du(self, path, total=True, maxdepth=None, **kwargs):
829
+ sizes = {}
830
+ # async for?
831
+ for f in await self._find(path, maxdepth=maxdepth, **kwargs):
832
+ info = await self._info(f)
833
+ sizes[info["name"]] = info["size"]
834
+ if total:
835
+ return sum(sizes.values())
836
+ else:
837
+ return sizes
838
+
839
+ async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
840
+ path = self._strip_protocol(path)
841
+ out = {}
842
+ detail = kwargs.pop("detail", False)
843
+
844
+ # Add the root directory if withdirs is requested
845
+ # This is needed for posix glob compliance
846
+ if withdirs and path != "" and await self._isdir(path):
847
+ out[path] = await self._info(path)
848
+
849
+ # async for?
850
+ async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
851
+ if withdirs:
852
+ files.update(dirs)
853
+ out.update({info["name"]: info for name, info in files.items()})
854
+ if not out and (await self._isfile(path)):
855
+ # walk works on directories, but find should also return [path]
856
+ # when path happens to be a file
857
+ out[path] = {}
858
+ names = sorted(out)
859
+ if not detail:
860
+ return names
861
+ else:
862
+ return {name: out[name] for name in names}
863
+
864
+ async def _expand_path(self, path, recursive=False, maxdepth=None):
865
+ if maxdepth is not None and maxdepth < 1:
866
+ raise ValueError("maxdepth must be at least 1")
867
+
868
+ if isinstance(path, str):
869
+ out = await self._expand_path([path], recursive, maxdepth)
870
+ else:
871
+ out = set()
872
+ path = [self._strip_protocol(p) for p in path]
873
+ for p in path: # can gather here
874
+ if has_magic(p):
875
+ bit = set(await self._glob(p, maxdepth=maxdepth))
876
+ out |= bit
877
+ if recursive:
878
+ # glob call above expanded one depth so if maxdepth is defined
879
+ # then decrement it in expand_path call below. If it is zero
880
+ # after decrementing then avoid expand_path call.
881
+ if maxdepth is not None and maxdepth <= 1:
882
+ continue
883
+ out |= set(
884
+ await self._expand_path(
885
+ list(bit),
886
+ recursive=recursive,
887
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
888
+ )
889
+ )
890
+ continue
891
+ elif recursive:
892
+ rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
893
+ out |= rec
894
+ if p not in out and (recursive is False or (await self._exists(p))):
895
+ # should only check once, for the root
896
+ out.add(p)
897
+ if not out:
898
+ raise FileNotFoundError(path)
899
+ return sorted(out)
900
+
901
+ async def _mkdir(self, path, create_parents=True, **kwargs):
902
+ pass # not necessary to implement, may not have directories
903
+
904
+ async def _makedirs(self, path, exist_ok=False):
905
+ pass # not necessary to implement, may not have directories
906
+
907
+ async def open_async(self, path, mode="rb", **kwargs):
908
+ if "b" not in mode or kwargs.get("compression"):
909
+ raise ValueError
910
+ raise NotImplementedError
911
+
912
+
913
+ def mirror_sync_methods(obj):
914
+ """Populate sync and async methods for obj
915
+
916
+ For each method will create a sync version if the name refers to an async method
917
+ (coroutine) and there is no override in the child class; will create an async
918
+ method for the corresponding sync method if there is no implementation.
919
+
920
+ Uses the methods specified in
921
+ - async_methods: the set that an implementation is expected to provide
922
+ - default_async_methods: that can be derived from their sync version in
923
+ AbstractFileSystem
924
+ - AsyncFileSystem: async-specific default coroutines
925
+ """
926
+ from fsspec import AbstractFileSystem
927
+
928
+ for method in async_methods + dir(AsyncFileSystem):
929
+ if not method.startswith("_"):
930
+ continue
931
+ smethod = method[1:]
932
+ if private.match(method):
933
+ isco = inspect.iscoroutinefunction(getattr(obj, method, None))
934
+ unsync = getattr(getattr(obj, smethod, False), "__func__", None)
935
+ is_default = unsync is getattr(AbstractFileSystem, smethod, "")
936
+ if isco and is_default:
937
+ mth = sync_wrapper(getattr(obj, method), obj=obj)
938
+ setattr(obj, smethod, mth)
939
+ if not mth.__doc__:
940
+ mth.__doc__ = getattr(
941
+ getattr(AbstractFileSystem, smethod, None), "__doc__", ""
942
+ )
943
+
944
+
945
+ class FSSpecCoroutineCancel(Exception):
946
+ pass
947
+
948
+
949
+ def _dump_running_tasks(
950
+ printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
951
+ ):
952
+ import traceback
953
+
954
+ tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
955
+ if printout:
956
+ [task.print_stack() for task in tasks]
957
+ out = [
958
+ {
959
+ "locals": task._coro.cr_frame.f_locals,
960
+ "file": task._coro.cr_frame.f_code.co_filename,
961
+ "firstline": task._coro.cr_frame.f_code.co_firstlineno,
962
+ "linelo": task._coro.cr_frame.f_lineno,
963
+ "stack": traceback.format_stack(task._coro.cr_frame),
964
+ "task": task if with_task else None,
965
+ }
966
+ for task in tasks
967
+ ]
968
+ if cancel:
969
+ for t in tasks:
970
+ cbs = t._callbacks
971
+ t.cancel()
972
+ asyncio.futures.Future.set_exception(t, exc)
973
+ asyncio.futures.Future.cancel(t)
974
+ [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
975
+ try:
976
+ t._coro.throw(exc) # exits coro, unless explicitly handled
977
+ except exc:
978
+ pass
979
+ return out
980
+
981
+
982
+ class AbstractAsyncStreamedFile(AbstractBufferedFile):
983
+ # no read buffering, and always auto-commit
984
+ # TODO: readahead might still be useful here, but needs async version
985
+
986
+ async def read(self, length=-1):
987
+ """
988
+ Return data from cache, or fetch pieces as necessary
989
+
990
+ Parameters
991
+ ----------
992
+ length: int (-1)
993
+ Number of bytes to read; if <0, all remaining bytes.
994
+ """
995
+ length = -1 if length is None else int(length)
996
+ if self.mode != "rb":
997
+ raise ValueError("File not in read mode")
998
+ if length < 0:
999
+ length = self.size - self.loc
1000
+ if self.closed:
1001
+ raise ValueError("I/O operation on closed file.")
1002
+ if length == 0:
1003
+ # don't even bother calling fetch
1004
+ return b""
1005
+ out = await self._fetch_range(self.loc, self.loc + length)
1006
+ self.loc += len(out)
1007
+ return out
1008
+
1009
+ async def write(self, data):
1010
+ """
1011
+ Write data to buffer.
1012
+
1013
+ Buffer only sent on flush() or if buffer is greater than
1014
+ or equal to blocksize.
1015
+
1016
+ Parameters
1017
+ ----------
1018
+ data: bytes
1019
+ Set of bytes to be written.
1020
+ """
1021
+ if self.mode not in {"wb", "ab"}:
1022
+ raise ValueError("File not in write mode")
1023
+ if self.closed:
1024
+ raise ValueError("I/O operation on closed file.")
1025
+ if self.forced:
1026
+ raise ValueError("This file has been force-flushed, can only close")
1027
+ out = self.buffer.write(data)
1028
+ self.loc += out
1029
+ if self.buffer.tell() >= self.blocksize:
1030
+ await self.flush()
1031
+ return out
1032
+
1033
+ async def close(self):
1034
+ """Close file
1035
+
1036
+ Finalizes writes, discards cache
1037
+ """
1038
+ if getattr(self, "_unclosable", False):
1039
+ return
1040
+ if self.closed:
1041
+ return
1042
+ if self.mode == "rb":
1043
+ self.cache = None
1044
+ else:
1045
+ if not self.forced:
1046
+ await self.flush(force=True)
1047
+
1048
+ if self.fs is not None:
1049
+ self.fs.invalidate_cache(self.path)
1050
+ self.fs.invalidate_cache(self.fs._parent(self.path))
1051
+
1052
+ self.closed = True
1053
+
1054
+ async def flush(self, force=False):
1055
+ if self.closed:
1056
+ raise ValueError("Flush on closed file")
1057
+ if force and self.forced:
1058
+ raise ValueError("Force flush cannot be called more than once")
1059
+ if force:
1060
+ self.forced = True
1061
+
1062
+ if self.mode not in {"wb", "ab"}:
1063
+ # no-op to flush on read-mode
1064
+ return
1065
+
1066
+ if not force and self.buffer.tell() < self.blocksize:
1067
+ # Defer write on small block
1068
+ return
1069
+
1070
+ if self.offset is None:
1071
+ # Initialize a multipart upload
1072
+ self.offset = 0
1073
+ try:
1074
+ await self._initiate_upload()
1075
+ except: # noqa: E722
1076
+ self.closed = True
1077
+ raise
1078
+
1079
+ if await self._upload_chunk(final=force) is not False:
1080
+ self.offset += self.buffer.seek(0, 2)
1081
+ self.buffer = io.BytesIO()
1082
+
1083
+ async def __aenter__(self):
1084
+ return self
1085
+
1086
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1087
+ await self.close()
1088
+
1089
+ async def _fetch_range(self, start, end):
1090
+ raise NotImplementedError
1091
+
1092
+ async def _initiate_upload(self):
1093
+ pass
1094
+
1095
+ async def _upload_chunk(self, final=False):
1096
+ raise NotImplementedError
venv/lib/python3.10/site-packages/fsspec/caching.py ADDED
@@ -0,0 +1,881 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ import functools
5
+ import logging
6
+ import math
7
+ import os
8
+ import threading
9
+ import warnings
10
+ from concurrent.futures import Future, ThreadPoolExecutor
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ Any,
14
+ Callable,
15
+ ClassVar,
16
+ Generic,
17
+ NamedTuple,
18
+ OrderedDict,
19
+ TypeVar,
20
+ )
21
+
22
+ if TYPE_CHECKING:
23
+ import mmap
24
+
25
+ from typing_extensions import ParamSpec
26
+
27
+ P = ParamSpec("P")
28
+ else:
29
+ P = TypeVar("P")
30
+
31
+ T = TypeVar("T")
32
+
33
+
34
+ logger = logging.getLogger("fsspec")
35
+
36
+ Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
37
+
38
+
39
+ class BaseCache:
40
+ """Pass-though cache: doesn't keep anything, calls every time
41
+
42
+ Acts as base class for other cachers
43
+
44
+ Parameters
45
+ ----------
46
+ blocksize: int
47
+ How far to read ahead in numbers of bytes
48
+ fetcher: func
49
+ Function of the form f(start, end) which gets bytes from remote as
50
+ specified
51
+ size: int
52
+ How big this file is
53
+ """
54
+
55
+ name: ClassVar[str] = "none"
56
+
57
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
58
+ self.blocksize = blocksize
59
+ self.fetcher = fetcher
60
+ self.size = size
61
+
62
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
63
+ if start is None:
64
+ start = 0
65
+ if stop is None:
66
+ stop = self.size
67
+ if start >= self.size or start >= stop:
68
+ return b""
69
+ return self.fetcher(start, stop)
70
+
71
+
72
+ class MMapCache(BaseCache):
73
+ """memory-mapped sparse file cache
74
+
75
+ Opens temporary file, which is filled blocks-wise when data is requested.
76
+ Ensure there is enough disc space in the temporary location.
77
+
78
+ This cache method might only work on posix
79
+ """
80
+
81
+ name = "mmap"
82
+
83
+ def __init__(
84
+ self,
85
+ blocksize: int,
86
+ fetcher: Fetcher,
87
+ size: int,
88
+ location: str | None = None,
89
+ blocks: set[int] | None = None,
90
+ ) -> None:
91
+ super().__init__(blocksize, fetcher, size)
92
+ self.blocks = set() if blocks is None else blocks
93
+ self.location = location
94
+ self.cache = self._makefile()
95
+
96
+ def _makefile(self) -> mmap.mmap | bytearray:
97
+ import mmap
98
+ import tempfile
99
+
100
+ if self.size == 0:
101
+ return bytearray()
102
+
103
+ # posix version
104
+ if self.location is None or not os.path.exists(self.location):
105
+ if self.location is None:
106
+ fd = tempfile.TemporaryFile()
107
+ self.blocks = set()
108
+ else:
109
+ fd = open(self.location, "wb+")
110
+ fd.seek(self.size - 1)
111
+ fd.write(b"1")
112
+ fd.flush()
113
+ else:
114
+ fd = open(self.location, "r+b")
115
+
116
+ return mmap.mmap(fd.fileno(), self.size)
117
+
118
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
119
+ logger.debug(f"MMap cache fetching {start}-{end}")
120
+ if start is None:
121
+ start = 0
122
+ if end is None:
123
+ end = self.size
124
+ if start >= self.size or start >= end:
125
+ return b""
126
+ start_block = start // self.blocksize
127
+ end_block = end // self.blocksize
128
+ need = [i for i in range(start_block, end_block + 1) if i not in self.blocks]
129
+ while need:
130
+ # TODO: not a for loop so we can consolidate blocks later to
131
+ # make fewer fetch calls; this could be parallel
132
+ i = need.pop(0)
133
+ sstart = i * self.blocksize
134
+ send = min(sstart + self.blocksize, self.size)
135
+ logger.debug(f"MMap get block #{i} ({sstart}-{send}")
136
+ self.cache[sstart:send] = self.fetcher(sstart, send)
137
+ self.blocks.add(i)
138
+
139
+ return self.cache[start:end]
140
+
141
+ def __getstate__(self) -> dict[str, Any]:
142
+ state = self.__dict__.copy()
143
+ # Remove the unpicklable entries.
144
+ del state["cache"]
145
+ return state
146
+
147
+ def __setstate__(self, state: dict[str, Any]) -> None:
148
+ # Restore instance attributes
149
+ self.__dict__.update(state)
150
+ self.cache = self._makefile()
151
+
152
+
153
+ class ReadAheadCache(BaseCache):
154
+ """Cache which reads only when we get beyond a block of data
155
+
156
+ This is a much simpler version of BytesCache, and does not attempt to
157
+ fill holes in the cache or keep fragments alive. It is best suited to
158
+ many small reads in a sequential order (e.g., reading lines from a file).
159
+ """
160
+
161
+ name = "readahead"
162
+
163
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
164
+ super().__init__(blocksize, fetcher, size)
165
+ self.cache = b""
166
+ self.start = 0
167
+ self.end = 0
168
+
169
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
170
+ if start is None:
171
+ start = 0
172
+ if end is None or end > self.size:
173
+ end = self.size
174
+ if start >= self.size or start >= end:
175
+ return b""
176
+ l = end - start
177
+ if start >= self.start and end <= self.end:
178
+ # cache hit
179
+ return self.cache[start - self.start : end - self.start]
180
+ elif self.start <= start < self.end:
181
+ # partial hit
182
+ part = self.cache[start - self.start :]
183
+ l -= len(part)
184
+ start = self.end
185
+ else:
186
+ # miss
187
+ part = b""
188
+ end = min(self.size, end + self.blocksize)
189
+ self.cache = self.fetcher(start, end) # new block replaces old
190
+ self.start = start
191
+ self.end = self.start + len(self.cache)
192
+ return part + self.cache[:l]
193
+
194
+
195
+ class FirstChunkCache(BaseCache):
196
+ """Caches the first block of a file only
197
+
198
+ This may be useful for file types where the metadata is stored in the header,
199
+ but is randomly accessed.
200
+ """
201
+
202
+ name = "first"
203
+
204
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
205
+ super().__init__(blocksize, fetcher, size)
206
+ self.cache: bytes | None = None
207
+
208
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
209
+ start = start or 0
210
+ end = end or self.size
211
+ if start < self.blocksize:
212
+ if self.cache is None:
213
+ if end > self.blocksize:
214
+ data = self.fetcher(0, end)
215
+ self.cache = data[: self.blocksize]
216
+ return data[start:]
217
+ self.cache = self.fetcher(0, self.blocksize)
218
+ part = self.cache[start:end]
219
+ if end > self.blocksize:
220
+ part += self.fetcher(self.blocksize, end)
221
+ return part
222
+ else:
223
+ return self.fetcher(start, end)
224
+
225
+
226
+ class BlockCache(BaseCache):
227
+ """
228
+ Cache holding memory as a set of blocks.
229
+
230
+ Requests are only ever made ``blocksize`` at a time, and are
231
+ stored in an LRU cache. The least recently accessed block is
232
+ discarded when more than ``maxblocks`` are stored.
233
+
234
+ Parameters
235
+ ----------
236
+ blocksize : int
237
+ The number of bytes to store in each block.
238
+ Requests are only ever made for ``blocksize``, so this
239
+ should balance the overhead of making a request against
240
+ the granularity of the blocks.
241
+ fetcher : Callable
242
+ size : int
243
+ The total size of the file being cached.
244
+ maxblocks : int
245
+ The maximum number of blocks to cache for. The maximum memory
246
+ use for this cache is then ``blocksize * maxblocks``.
247
+ """
248
+
249
+ name = "blockcache"
250
+
251
+ def __init__(
252
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
253
+ ) -> None:
254
+ super().__init__(blocksize, fetcher, size)
255
+ self.nblocks = math.ceil(size / blocksize)
256
+ self.maxblocks = maxblocks
257
+ self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block)
258
+
259
+ def __repr__(self) -> str:
260
+ return (
261
+ f"<BlockCache blocksize={self.blocksize}, "
262
+ f"size={self.size}, nblocks={self.nblocks}>"
263
+ )
264
+
265
+ def cache_info(self):
266
+ """
267
+ The statistics on the block cache.
268
+
269
+ Returns
270
+ -------
271
+ NamedTuple
272
+ Returned directly from the LRU Cache used internally.
273
+ """
274
+ return self._fetch_block_cached.cache_info()
275
+
276
+ def __getstate__(self) -> dict[str, Any]:
277
+ state = self.__dict__
278
+ del state["_fetch_block_cached"]
279
+ return state
280
+
281
+ def __setstate__(self, state: dict[str, Any]) -> None:
282
+ self.__dict__.update(state)
283
+ self._fetch_block_cached = functools.lru_cache(state["maxblocks"])(
284
+ self._fetch_block
285
+ )
286
+
287
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
288
+ if start is None:
289
+ start = 0
290
+ if end is None:
291
+ end = self.size
292
+ if start >= self.size or start >= end:
293
+ return b""
294
+
295
+ # byte position -> block numbers
296
+ start_block_number = start // self.blocksize
297
+ end_block_number = end // self.blocksize
298
+
299
+ # these are cached, so safe to do multiple calls for the same start and end.
300
+ for block_number in range(start_block_number, end_block_number + 1):
301
+ self._fetch_block_cached(block_number)
302
+
303
+ return self._read_cache(
304
+ start,
305
+ end,
306
+ start_block_number=start_block_number,
307
+ end_block_number=end_block_number,
308
+ )
309
+
310
+ def _fetch_block(self, block_number: int) -> bytes:
311
+ """
312
+ Fetch the block of data for `block_number`.
313
+ """
314
+ if block_number > self.nblocks:
315
+ raise ValueError(
316
+ f"'block_number={block_number}' is greater than "
317
+ f"the number of blocks ({self.nblocks})"
318
+ )
319
+
320
+ start = block_number * self.blocksize
321
+ end = start + self.blocksize
322
+ logger.info("BlockCache fetching block %d", block_number)
323
+ block_contents = super()._fetch(start, end)
324
+ return block_contents
325
+
326
+ def _read_cache(
327
+ self, start: int, end: int, start_block_number: int, end_block_number: int
328
+ ) -> bytes:
329
+ """
330
+ Read from our block cache.
331
+
332
+ Parameters
333
+ ----------
334
+ start, end : int
335
+ The start and end byte positions.
336
+ start_block_number, end_block_number : int
337
+ The start and end block numbers.
338
+ """
339
+ start_pos = start % self.blocksize
340
+ end_pos = end % self.blocksize
341
+
342
+ if start_block_number == end_block_number:
343
+ block: bytes = self._fetch_block_cached(start_block_number)
344
+ return block[start_pos:end_pos]
345
+
346
+ else:
347
+ # read from the initial
348
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
349
+
350
+ # intermediate blocks
351
+ # Note: it'd be nice to combine these into one big request. However
352
+ # that doesn't play nicely with our LRU cache.
353
+ out.extend(
354
+ map(
355
+ self._fetch_block_cached,
356
+ range(start_block_number + 1, end_block_number),
357
+ )
358
+ )
359
+
360
+ # final block
361
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
362
+
363
+ return b"".join(out)
364
+
365
+
366
+ class BytesCache(BaseCache):
367
+ """Cache which holds data in a in-memory bytes object
368
+
369
+ Implements read-ahead by the block size, for semi-random reads progressing
370
+ through the file.
371
+
372
+ Parameters
373
+ ----------
374
+ trim: bool
375
+ As we read more data, whether to discard the start of the buffer when
376
+ we are more than a blocksize ahead of it.
377
+ """
378
+
379
+ name: ClassVar[str] = "bytes"
380
+
381
+ def __init__(
382
+ self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True
383
+ ) -> None:
384
+ super().__init__(blocksize, fetcher, size)
385
+ self.cache = b""
386
+ self.start: int | None = None
387
+ self.end: int | None = None
388
+ self.trim = trim
389
+
390
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
391
+ # TODO: only set start/end after fetch, in case it fails?
392
+ # is this where retry logic might go?
393
+ if start is None:
394
+ start = 0
395
+ if end is None:
396
+ end = self.size
397
+ if start >= self.size or start >= end:
398
+ return b""
399
+ if (
400
+ self.start is not None
401
+ and start >= self.start
402
+ and self.end is not None
403
+ and end < self.end
404
+ ):
405
+ # cache hit: we have all the required data
406
+ offset = start - self.start
407
+ return self.cache[offset : offset + end - start]
408
+
409
+ if self.blocksize:
410
+ bend = min(self.size, end + self.blocksize)
411
+ else:
412
+ bend = end
413
+
414
+ if bend == start or start > self.size:
415
+ return b""
416
+
417
+ if (self.start is None or start < self.start) and (
418
+ self.end is None or end > self.end
419
+ ):
420
+ # First read, or extending both before and after
421
+ self.cache = self.fetcher(start, bend)
422
+ self.start = start
423
+ else:
424
+ assert self.start is not None
425
+ assert self.end is not None
426
+
427
+ if start < self.start:
428
+ if self.end is None or self.end - end > self.blocksize:
429
+ self.cache = self.fetcher(start, bend)
430
+ self.start = start
431
+ else:
432
+ new = self.fetcher(start, self.start)
433
+ self.start = start
434
+ self.cache = new + self.cache
435
+ elif self.end is not None and bend > self.end:
436
+ if self.end > self.size:
437
+ pass
438
+ elif end - self.end > self.blocksize:
439
+ self.cache = self.fetcher(start, bend)
440
+ self.start = start
441
+ else:
442
+ new = self.fetcher(self.end, bend)
443
+ self.cache = self.cache + new
444
+
445
+ self.end = self.start + len(self.cache)
446
+ offset = start - self.start
447
+ out = self.cache[offset : offset + end - start]
448
+ if self.trim:
449
+ num = (self.end - self.start) // (self.blocksize + 1)
450
+ if num > 1:
451
+ self.start += self.blocksize * num
452
+ self.cache = self.cache[self.blocksize * num :]
453
+ return out
454
+
455
+ def __len__(self) -> int:
456
+ return len(self.cache)
457
+
458
+
459
+ class AllBytes(BaseCache):
460
+ """Cache entire contents of the file"""
461
+
462
+ name: ClassVar[str] = "all"
463
+
464
+ def __init__(
465
+ self,
466
+ blocksize: int | None = None,
467
+ fetcher: Fetcher | None = None,
468
+ size: int | None = None,
469
+ data: bytes | None = None,
470
+ ) -> None:
471
+ super().__init__(blocksize, fetcher, size) # type: ignore[arg-type]
472
+ if data is None:
473
+ data = self.fetcher(0, self.size)
474
+ self.data = data
475
+
476
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
477
+ return self.data[start:stop]
478
+
479
+
480
+ class KnownPartsOfAFile(BaseCache):
481
+ """
482
+ Cache holding known file parts.
483
+
484
+ Parameters
485
+ ----------
486
+ blocksize: int
487
+ How far to read ahead in numbers of bytes
488
+ fetcher: func
489
+ Function of the form f(start, end) which gets bytes from remote as
490
+ specified
491
+ size: int
492
+ How big this file is
493
+ data: dict
494
+ A dictionary mapping explicit `(start, stop)` file-offset tuples
495
+ with known bytes.
496
+ strict: bool, default True
497
+ Whether to fetch reads that go beyond a known byte-range boundary.
498
+ If `False`, any read that ends outside a known part will be zero
499
+ padded. Note that zero padding will not be used for reads that
500
+ begin outside a known byte-range.
501
+ """
502
+
503
+ name: ClassVar[str] = "parts"
504
+
505
+ def __init__(
506
+ self,
507
+ blocksize: int,
508
+ fetcher: Fetcher,
509
+ size: int,
510
+ data: dict[tuple[int, int], bytes] = {},
511
+ strict: bool = True,
512
+ **_: Any,
513
+ ):
514
+ super().__init__(blocksize, fetcher, size)
515
+ self.strict = strict
516
+
517
+ # simple consolidation of contiguous blocks
518
+ if data:
519
+ old_offsets = sorted(data.keys())
520
+ offsets = [old_offsets[0]]
521
+ blocks = [data.pop(old_offsets[0])]
522
+ for start, stop in old_offsets[1:]:
523
+ start0, stop0 = offsets[-1]
524
+ if start == stop0:
525
+ offsets[-1] = (start0, stop)
526
+ blocks[-1] += data.pop((start, stop))
527
+ else:
528
+ offsets.append((start, stop))
529
+ blocks.append(data.pop((start, stop)))
530
+
531
+ self.data = dict(zip(offsets, blocks))
532
+ else:
533
+ self.data = data
534
+
535
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
536
+ if start is None:
537
+ start = 0
538
+ if stop is None:
539
+ stop = self.size
540
+
541
+ out = b""
542
+ for (loc0, loc1), data in self.data.items():
543
+ # If self.strict=False, use zero-padded data
544
+ # for reads beyond the end of a "known" buffer
545
+ if loc0 <= start < loc1:
546
+ off = start - loc0
547
+ out = data[off : off + stop - start]
548
+ if not self.strict or loc0 <= stop <= loc1:
549
+ # The request is within a known range, or
550
+ # it begins within a known range, and we
551
+ # are allowed to pad reads beyond the
552
+ # buffer with zero
553
+ out += b"\x00" * (stop - start - len(out))
554
+ return out
555
+ else:
556
+ # The request ends outside a known range,
557
+ # and we are being "strict" about reads
558
+ # beyond the buffer
559
+ start = loc1
560
+ break
561
+
562
+ # We only get here if there is a request outside the
563
+ # known parts of the file. In an ideal world, this
564
+ # should never happen
565
+ if self.fetcher is None:
566
+ # We cannot fetch the data, so raise an error
567
+ raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ")
568
+ # We can fetch the data, but should warn the user
569
+ # that this may be slow
570
+ warnings.warn(
571
+ f"Read is outside the known file parts: {(start, stop)}. "
572
+ f"IO/caching performance may be poor!"
573
+ )
574
+ logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}")
575
+ return out + super()._fetch(start, stop)
576
+
577
+
578
+ class UpdatableLRU(Generic[P, T]):
579
+ """
580
+ Custom implementation of LRU cache that allows updating keys
581
+
582
+ Used by BackgroudBlockCache
583
+ """
584
+
585
+ class CacheInfo(NamedTuple):
586
+ hits: int
587
+ misses: int
588
+ maxsize: int
589
+ currsize: int
590
+
591
+ def __init__(self, func: Callable[P, T], max_size: int = 128) -> None:
592
+ self._cache: OrderedDict[Any, T] = collections.OrderedDict()
593
+ self._func = func
594
+ self._max_size = max_size
595
+ self._hits = 0
596
+ self._misses = 0
597
+ self._lock = threading.Lock()
598
+
599
+ def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T:
600
+ if kwargs:
601
+ raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}")
602
+ with self._lock:
603
+ if args in self._cache:
604
+ self._cache.move_to_end(args)
605
+ self._hits += 1
606
+ return self._cache[args]
607
+
608
+ result = self._func(*args, **kwargs)
609
+
610
+ with self._lock:
611
+ self._cache[args] = result
612
+ self._misses += 1
613
+ if len(self._cache) > self._max_size:
614
+ self._cache.popitem(last=False)
615
+
616
+ return result
617
+
618
+ def is_key_cached(self, *args: Any) -> bool:
619
+ with self._lock:
620
+ return args in self._cache
621
+
622
+ def add_key(self, result: T, *args: Any) -> None:
623
+ with self._lock:
624
+ self._cache[args] = result
625
+ if len(self._cache) > self._max_size:
626
+ self._cache.popitem(last=False)
627
+
628
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
629
+ with self._lock:
630
+ return self.CacheInfo(
631
+ maxsize=self._max_size,
632
+ currsize=len(self._cache),
633
+ hits=self._hits,
634
+ misses=self._misses,
635
+ )
636
+
637
+
638
+ class BackgroundBlockCache(BaseCache):
639
+ """
640
+ Cache holding memory as a set of blocks with pre-loading of
641
+ the next block in the background.
642
+
643
+ Requests are only ever made ``blocksize`` at a time, and are
644
+ stored in an LRU cache. The least recently accessed block is
645
+ discarded when more than ``maxblocks`` are stored. If the
646
+ next block is not in cache, it is loaded in a separate thread
647
+ in non-blocking way.
648
+
649
+ Parameters
650
+ ----------
651
+ blocksize : int
652
+ The number of bytes to store in each block.
653
+ Requests are only ever made for ``blocksize``, so this
654
+ should balance the overhead of making a request against
655
+ the granularity of the blocks.
656
+ fetcher : Callable
657
+ size : int
658
+ The total size of the file being cached.
659
+ maxblocks : int
660
+ The maximum number of blocks to cache for. The maximum memory
661
+ use for this cache is then ``blocksize * maxblocks``.
662
+ """
663
+
664
+ name: ClassVar[str] = "background"
665
+
666
+ def __init__(
667
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
668
+ ) -> None:
669
+ super().__init__(blocksize, fetcher, size)
670
+ self.nblocks = math.ceil(size / blocksize)
671
+ self.maxblocks = maxblocks
672
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks)
673
+
674
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
675
+ self._fetch_future_block_number: int | None = None
676
+ self._fetch_future: Future[bytes] | None = None
677
+ self._fetch_future_lock = threading.Lock()
678
+
679
+ def __repr__(self) -> str:
680
+ return (
681
+ f"<BackgroundBlockCache blocksize={self.blocksize}, "
682
+ f"size={self.size}, nblocks={self.nblocks}>"
683
+ )
684
+
685
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
686
+ """
687
+ The statistics on the block cache.
688
+
689
+ Returns
690
+ -------
691
+ NamedTuple
692
+ Returned directly from the LRU Cache used internally.
693
+ """
694
+ return self._fetch_block_cached.cache_info()
695
+
696
+ def __getstate__(self) -> dict[str, Any]:
697
+ state = self.__dict__
698
+ del state["_fetch_block_cached"]
699
+ del state["_thread_executor"]
700
+ del state["_fetch_future_block_number"]
701
+ del state["_fetch_future"]
702
+ del state["_fetch_future_lock"]
703
+ return state
704
+
705
+ def __setstate__(self, state) -> None:
706
+ self.__dict__.update(state)
707
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"])
708
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
709
+ self._fetch_future_block_number = None
710
+ self._fetch_future = None
711
+ self._fetch_future_lock = threading.Lock()
712
+
713
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
714
+ if start is None:
715
+ start = 0
716
+ if end is None:
717
+ end = self.size
718
+ if start >= self.size or start >= end:
719
+ return b""
720
+
721
+ # byte position -> block numbers
722
+ start_block_number = start // self.blocksize
723
+ end_block_number = end // self.blocksize
724
+
725
+ fetch_future_block_number = None
726
+ fetch_future = None
727
+ with self._fetch_future_lock:
728
+ # Background thread is running. Check we we can or must join it.
729
+ if self._fetch_future is not None:
730
+ assert self._fetch_future_block_number is not None
731
+ if self._fetch_future.done():
732
+ logger.info("BlockCache joined background fetch without waiting.")
733
+ self._fetch_block_cached.add_key(
734
+ self._fetch_future.result(), self._fetch_future_block_number
735
+ )
736
+ # Cleanup the fetch variables. Done with fetching the block.
737
+ self._fetch_future_block_number = None
738
+ self._fetch_future = None
739
+ else:
740
+ # Must join if we need the block for the current fetch
741
+ must_join = bool(
742
+ start_block_number
743
+ <= self._fetch_future_block_number
744
+ <= end_block_number
745
+ )
746
+ if must_join:
747
+ # Copy to the local variables to release lock
748
+ # before waiting for result
749
+ fetch_future_block_number = self._fetch_future_block_number
750
+ fetch_future = self._fetch_future
751
+
752
+ # Cleanup the fetch variables. Have a local copy.
753
+ self._fetch_future_block_number = None
754
+ self._fetch_future = None
755
+
756
+ # Need to wait for the future for the current read
757
+ if fetch_future is not None:
758
+ logger.info("BlockCache waiting for background fetch.")
759
+ # Wait until result and put it in cache
760
+ self._fetch_block_cached.add_key(
761
+ fetch_future.result(), fetch_future_block_number
762
+ )
763
+
764
+ # these are cached, so safe to do multiple calls for the same start and end.
765
+ for block_number in range(start_block_number, end_block_number + 1):
766
+ self._fetch_block_cached(block_number)
767
+
768
+ # fetch next block in the background if nothing is running in the background,
769
+ # the block is within file and it is not already cached
770
+ end_block_plus_1 = end_block_number + 1
771
+ with self._fetch_future_lock:
772
+ if (
773
+ self._fetch_future is None
774
+ and end_block_plus_1 <= self.nblocks
775
+ and not self._fetch_block_cached.is_key_cached(end_block_plus_1)
776
+ ):
777
+ self._fetch_future_block_number = end_block_plus_1
778
+ self._fetch_future = self._thread_executor.submit(
779
+ self._fetch_block, end_block_plus_1, "async"
780
+ )
781
+
782
+ return self._read_cache(
783
+ start,
784
+ end,
785
+ start_block_number=start_block_number,
786
+ end_block_number=end_block_number,
787
+ )
788
+
789
+ def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes:
790
+ """
791
+ Fetch the block of data for `block_number`.
792
+ """
793
+ if block_number > self.nblocks:
794
+ raise ValueError(
795
+ f"'block_number={block_number}' is greater than "
796
+ f"the number of blocks ({self.nblocks})"
797
+ )
798
+
799
+ start = block_number * self.blocksize
800
+ end = start + self.blocksize
801
+ logger.info("BlockCache fetching block (%s) %d", log_info, block_number)
802
+ block_contents = super()._fetch(start, end)
803
+ return block_contents
804
+
805
+ def _read_cache(
806
+ self, start: int, end: int, start_block_number: int, end_block_number: int
807
+ ) -> bytes:
808
+ """
809
+ Read from our block cache.
810
+
811
+ Parameters
812
+ ----------
813
+ start, end : int
814
+ The start and end byte positions.
815
+ start_block_number, end_block_number : int
816
+ The start and end block numbers.
817
+ """
818
+ start_pos = start % self.blocksize
819
+ end_pos = end % self.blocksize
820
+
821
+ if start_block_number == end_block_number:
822
+ block = self._fetch_block_cached(start_block_number)
823
+ return block[start_pos:end_pos]
824
+
825
+ else:
826
+ # read from the initial
827
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
828
+
829
+ # intermediate blocks
830
+ # Note: it'd be nice to combine these into one big request. However
831
+ # that doesn't play nicely with our LRU cache.
832
+ out.extend(
833
+ map(
834
+ self._fetch_block_cached,
835
+ range(start_block_number + 1, end_block_number),
836
+ )
837
+ )
838
+
839
+ # final block
840
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
841
+
842
+ return b"".join(out)
843
+
844
+
845
+ caches: dict[str | None, type[BaseCache]] = {
846
+ # one custom case
847
+ None: BaseCache,
848
+ }
849
+
850
+
851
+ def register_cache(cls: type[BaseCache], clobber: bool = False) -> None:
852
+ """'Register' cache implementation.
853
+
854
+ Parameters
855
+ ----------
856
+ clobber: bool, optional
857
+ If set to True (default is False) - allow to overwrite existing
858
+ entry.
859
+
860
+ Raises
861
+ ------
862
+ ValueError
863
+ """
864
+ name = cls.name
865
+ if not clobber and name in caches:
866
+ raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}")
867
+ caches[name] = cls
868
+
869
+
870
+ for c in (
871
+ BaseCache,
872
+ MMapCache,
873
+ BytesCache,
874
+ ReadAheadCache,
875
+ BlockCache,
876
+ FirstChunkCache,
877
+ AllBytes,
878
+ KnownPartsOfAFile,
879
+ BackgroundBlockCache,
880
+ ):
881
+ register_cache(c)
venv/lib/python3.10/site-packages/fsspec/callbacks.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+
3
+
4
+ class Callback:
5
+ """
6
+ Base class and interface for callback mechanism
7
+
8
+ This class can be used directly for monitoring file transfers by
9
+ providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument,
10
+ below), or subclassed for more specialised behaviour.
11
+
12
+ Parameters
13
+ ----------
14
+ size: int (optional)
15
+ Nominal quantity for the value that corresponds to a complete
16
+ transfer, e.g., total number of tiles or total number of
17
+ bytes
18
+ value: int (0)
19
+ Starting internal counter value
20
+ hooks: dict or None
21
+ A dict of named functions to be called on each update. The signature
22
+ of these must be ``f(size, value, **kwargs)``
23
+ """
24
+
25
+ def __init__(self, size=None, value=0, hooks=None, **kwargs):
26
+ self.size = size
27
+ self.value = value
28
+ self.hooks = hooks or {}
29
+ self.kw = kwargs
30
+
31
+ def __enter__(self):
32
+ return self
33
+
34
+ def __exit__(self, *exc_args):
35
+ self.close()
36
+
37
+ def close(self):
38
+ """Close callback."""
39
+
40
+ def branched(self, path_1, path_2, **kwargs):
41
+ """
42
+ Return callback for child transfers
43
+
44
+ If this callback is operating at a higher level, e.g., put, which may
45
+ trigger transfers that can also be monitored. The function returns a callback
46
+ that has to be passed to the child method, e.g., put_file,
47
+ as `callback=` argument.
48
+
49
+ The implementation uses `callback.branch` for compatibility.
50
+ When implementing callbacks, it is recommended to override this function instead
51
+ of `branch` and avoid calling `super().branched(...)`.
52
+
53
+ Prefer using this function over `branch`.
54
+
55
+ Parameters
56
+ ----------
57
+ path_1: str
58
+ Child's source path
59
+ path_2: str
60
+ Child's destination path
61
+ **kwargs:
62
+ Arbitrary keyword arguments
63
+
64
+ Returns
65
+ -------
66
+ callback: Callback
67
+ A callback instance to be passed to the child method
68
+ """
69
+ self.branch(path_1, path_2, kwargs)
70
+ # mutate kwargs so that we can force the caller to pass "callback=" explicitly
71
+ return kwargs.pop("callback", DEFAULT_CALLBACK)
72
+
73
+ def branch_coro(self, fn):
74
+ """
75
+ Wraps a coroutine, and pass a new child callback to it.
76
+ """
77
+
78
+ @wraps(fn)
79
+ async def func(path1, path2: str, **kwargs):
80
+ with self.branched(path1, path2, **kwargs) as child:
81
+ return await fn(path1, path2, callback=child, **kwargs)
82
+
83
+ return func
84
+
85
+ def set_size(self, size):
86
+ """
87
+ Set the internal maximum size attribute
88
+
89
+ Usually called if not initially set at instantiation. Note that this
90
+ triggers a ``call()``.
91
+
92
+ Parameters
93
+ ----------
94
+ size: int
95
+ """
96
+ self.size = size
97
+ self.call()
98
+
99
+ def absolute_update(self, value):
100
+ """
101
+ Set the internal value state
102
+
103
+ Triggers ``call()``
104
+
105
+ Parameters
106
+ ----------
107
+ value: int
108
+ """
109
+ self.value = value
110
+ self.call()
111
+
112
+ def relative_update(self, inc=1):
113
+ """
114
+ Delta increment the internal counter
115
+
116
+ Triggers ``call()``
117
+
118
+ Parameters
119
+ ----------
120
+ inc: int
121
+ """
122
+ self.value += inc
123
+ self.call()
124
+
125
+ def call(self, hook_name=None, **kwargs):
126
+ """
127
+ Execute hook(s) with current state
128
+
129
+ Each function is passed the internal size and current value
130
+
131
+ Parameters
132
+ ----------
133
+ hook_name: str or None
134
+ If given, execute on this hook
135
+ kwargs: passed on to (all) hook(s)
136
+ """
137
+ if not self.hooks:
138
+ return
139
+ kw = self.kw.copy()
140
+ kw.update(kwargs)
141
+ if hook_name:
142
+ if hook_name not in self.hooks:
143
+ return
144
+ return self.hooks[hook_name](self.size, self.value, **kw)
145
+ for hook in self.hooks.values() or []:
146
+ hook(self.size, self.value, **kw)
147
+
148
+ def wrap(self, iterable):
149
+ """
150
+ Wrap an iterable to call ``relative_update`` on each iterations
151
+
152
+ Parameters
153
+ ----------
154
+ iterable: Iterable
155
+ The iterable that is being wrapped
156
+ """
157
+ for item in iterable:
158
+ self.relative_update()
159
+ yield item
160
+
161
+ def branch(self, path_1, path_2, kwargs):
162
+ """
163
+ Set callbacks for child transfers
164
+
165
+ If this callback is operating at a higher level, e.g., put, which may
166
+ trigger transfers that can also be monitored. The passed kwargs are
167
+ to be *mutated* to add ``callback=``, if this class supports branching
168
+ to children.
169
+
170
+ Parameters
171
+ ----------
172
+ path_1: str
173
+ Child's source path
174
+ path_2: str
175
+ Child's destination path
176
+ kwargs: dict
177
+ arguments passed to child method, e.g., put_file.
178
+
179
+ Returns
180
+ -------
181
+
182
+ """
183
+ return None
184
+
185
+ def no_op(self, *_, **__):
186
+ pass
187
+
188
+ def __getattr__(self, item):
189
+ """
190
+ If undefined methods are called on this class, nothing happens
191
+ """
192
+ return self.no_op
193
+
194
+ @classmethod
195
+ def as_callback(cls, maybe_callback=None):
196
+ """Transform callback=... into Callback instance
197
+
198
+ For the special value of ``None``, return the global instance of
199
+ ``NoOpCallback``. This is an alternative to including
200
+ ``callback=DEFAULT_CALLBACK`` directly in a method signature.
201
+ """
202
+ if maybe_callback is None:
203
+ return DEFAULT_CALLBACK
204
+ return maybe_callback
205
+
206
+
207
+ class NoOpCallback(Callback):
208
+ """
209
+ This implementation of Callback does exactly nothing
210
+ """
211
+
212
+ def call(self, *args, **kwargs):
213
+ return None
214
+
215
+
216
+ class DotPrinterCallback(Callback):
217
+ """
218
+ Simple example Callback implementation
219
+
220
+ Almost identical to Callback with a hook that prints a char; here we
221
+ demonstrate how the outer layer may print "#" and the inner layer "."
222
+ """
223
+
224
+ def __init__(self, chr_to_print="#", **kwargs):
225
+ self.chr = chr_to_print
226
+ super().__init__(**kwargs)
227
+
228
+ def branch(self, path_1, path_2, kwargs):
229
+ """Mutate kwargs to add new instance with different print char"""
230
+ kwargs["callback"] = DotPrinterCallback(".")
231
+
232
+ def call(self, **kwargs):
233
+ """Just outputs a character"""
234
+ print(self.chr, end="")
235
+
236
+
237
+ class TqdmCallback(Callback):
238
+ """
239
+ A callback to display a progress bar using tqdm
240
+
241
+ Parameters
242
+ ----------
243
+ tqdm_kwargs : dict, (optional)
244
+ Any argument accepted by the tqdm constructor.
245
+ See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_.
246
+ Will be forwarded to `tqdm_cls`.
247
+ tqdm_cls: (optional)
248
+ subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`.
249
+
250
+ Examples
251
+ --------
252
+ >>> import fsspec
253
+ >>> from fsspec.callbacks import TqdmCallback
254
+ >>> fs = fsspec.filesystem("memory")
255
+ >>> path2distant_data = "/your-path"
256
+ >>> fs.upload(
257
+ ".",
258
+ path2distant_data,
259
+ recursive=True,
260
+ callback=TqdmCallback(),
261
+ )
262
+
263
+ You can forward args to tqdm using the ``tqdm_kwargs`` parameter.
264
+
265
+ >>> fs.upload(
266
+ ".",
267
+ path2distant_data,
268
+ recursive=True,
269
+ callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}),
270
+ )
271
+
272
+ You can also customize the progress bar by passing a subclass of `tqdm`.
273
+
274
+ .. code-block:: python
275
+
276
+ class TqdmFormat(tqdm):
277
+ '''Provides a `total_time` format parameter'''
278
+ @property
279
+ def format_dict(self):
280
+ d = super().format_dict
281
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
282
+ d.update(total_time=self.format_interval(total_time) + " in total")
283
+ return d
284
+
285
+ >>> with TqdmCallback(
286
+ tqdm_kwargs={
287
+ "desc": "desc",
288
+ "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}",
289
+ },
290
+ tqdm_cls=TqdmFormat,
291
+ ) as callback:
292
+ fs.upload(".", path2distant_data, recursive=True, callback=callback)
293
+ """
294
+
295
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
296
+ try:
297
+ from tqdm import tqdm
298
+
299
+ except ImportError as exce:
300
+ raise ImportError(
301
+ "Using TqdmCallback requires tqdm to be installed"
302
+ ) from exce
303
+
304
+ self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm)
305
+ self._tqdm_kwargs = tqdm_kwargs or {}
306
+ self.tqdm = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def call(self, *args, **kwargs):
310
+ if self.tqdm is None:
311
+ self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs)
312
+ self.tqdm.total = self.size
313
+ self.tqdm.update(self.value - self.tqdm.n)
314
+
315
+ def close(self):
316
+ if self.tqdm is not None:
317
+ self.tqdm.close()
318
+ self.tqdm = None
319
+
320
+ def __del__(self):
321
+ return self.close()
322
+
323
+
324
+ DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
venv/lib/python3.10/site-packages/fsspec/compression.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper functions for a standard streaming compression API"""
2
+ from zipfile import ZipFile
3
+
4
+ import fsspec.utils
5
+ from fsspec.spec import AbstractBufferedFile
6
+
7
+
8
+ def noop_file(file, mode, **kwargs):
9
+ return file
10
+
11
+
12
+ # TODO: files should also be available as contexts
13
+ # should be functions of the form func(infile, mode=, **kwargs) -> file-like
14
+ compr = {None: noop_file}
15
+
16
+
17
+ def register_compression(name, callback, extensions, force=False):
18
+ """Register an "inferable" file compression type.
19
+
20
+ Registers transparent file compression type for use with fsspec.open.
21
+ Compression can be specified by name in open, or "infer"-ed for any files
22
+ ending with the given extensions.
23
+
24
+ Args:
25
+ name: (str) The compression type name. Eg. "gzip".
26
+ callback: A callable of form (infile, mode, **kwargs) -> file-like.
27
+ Accepts an input file-like object, the target mode and kwargs.
28
+ Returns a wrapped file-like object.
29
+ extensions: (str, Iterable[str]) A file extension, or list of file
30
+ extensions for which to infer this compression scheme. Eg. "gz".
31
+ force: (bool) Force re-registration of compression type or extensions.
32
+
33
+ Raises:
34
+ ValueError: If name or extensions already registered, and not force.
35
+
36
+ """
37
+ if isinstance(extensions, str):
38
+ extensions = [extensions]
39
+
40
+ # Validate registration
41
+ if name in compr and not force:
42
+ raise ValueError(f"Duplicate compression registration: {name}")
43
+
44
+ for ext in extensions:
45
+ if ext in fsspec.utils.compressions and not force:
46
+ raise ValueError(f"Duplicate compression file extension: {ext} ({name})")
47
+
48
+ compr[name] = callback
49
+
50
+ for ext in extensions:
51
+ fsspec.utils.compressions[ext] = name
52
+
53
+
54
+ def unzip(infile, mode="rb", filename=None, **kwargs):
55
+ if "r" not in mode:
56
+ filename = filename or "file"
57
+ z = ZipFile(infile, mode="w", **kwargs)
58
+ fo = z.open(filename, mode="w")
59
+ fo.close = lambda closer=fo.close: closer() or z.close()
60
+ return fo
61
+ z = ZipFile(infile)
62
+ if filename is None:
63
+ filename = z.namelist()[0]
64
+ return z.open(filename, mode="r", **kwargs)
65
+
66
+
67
+ register_compression("zip", unzip, "zip")
68
+
69
+ try:
70
+ from bz2 import BZ2File
71
+ except ImportError:
72
+ pass
73
+ else:
74
+ register_compression("bz2", BZ2File, "bz2")
75
+
76
+ try: # pragma: no cover
77
+ from isal import igzip
78
+
79
+ def isal(infile, mode="rb", **kwargs):
80
+ return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs)
81
+
82
+ register_compression("gzip", isal, "gz")
83
+ except ImportError:
84
+ from gzip import GzipFile
85
+
86
+ register_compression(
87
+ "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz"
88
+ )
89
+
90
+ try:
91
+ from lzma import LZMAFile
92
+
93
+ register_compression("lzma", LZMAFile, "lzma")
94
+ register_compression("xz", LZMAFile, "xz")
95
+ except ImportError:
96
+ pass
97
+
98
+ try:
99
+ import lzmaffi
100
+
101
+ register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True)
102
+ register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
103
+ except ImportError:
104
+ pass
105
+
106
+
107
+ class SnappyFile(AbstractBufferedFile):
108
+ def __init__(self, infile, mode, **kwargs):
109
+ import snappy
110
+
111
+ super().__init__(
112
+ fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs
113
+ )
114
+ self.infile = infile
115
+ if "r" in mode:
116
+ self.codec = snappy.StreamDecompressor()
117
+ else:
118
+ self.codec = snappy.StreamCompressor()
119
+
120
+ def _upload_chunk(self, final=False):
121
+ self.buffer.seek(0)
122
+ out = self.codec.add_chunk(self.buffer.read())
123
+ self.infile.write(out)
124
+ return True
125
+
126
+ def seek(self, loc, whence=0):
127
+ raise NotImplementedError("SnappyFile is not seekable")
128
+
129
+ def seekable(self):
130
+ return False
131
+
132
+ def _fetch_range(self, start, end):
133
+ """Get the specified set of bytes from remote"""
134
+ data = self.infile.read(end - start)
135
+ return self.codec.decompress(data)
136
+
137
+
138
+ try:
139
+ import snappy
140
+
141
+ snappy.compress
142
+ # Snappy may use the .sz file extension, but this is not part of the
143
+ # standard implementation.
144
+ register_compression("snappy", SnappyFile, [])
145
+
146
+ except (ImportError, NameError, AttributeError):
147
+ pass
148
+
149
+ try:
150
+ import lz4.frame
151
+
152
+ register_compression("lz4", lz4.frame.open, "lz4")
153
+ except ImportError:
154
+ pass
155
+
156
+ try:
157
+ import zstandard as zstd
158
+
159
+ def zstandard_file(infile, mode="rb"):
160
+ if "r" in mode:
161
+ cctx = zstd.ZstdDecompressor()
162
+ return cctx.stream_reader(infile)
163
+ else:
164
+ cctx = zstd.ZstdCompressor(level=10)
165
+ return cctx.stream_writer(infile)
166
+
167
+ register_compression("zstd", zstandard_file, "zst")
168
+ except ImportError:
169
+ pass
170
+
171
+
172
+ def available_compressions():
173
+ """Return a list of the implemented compressions."""
174
+ return list(compr)
venv/lib/python3.10/site-packages/fsspec/config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import configparser
4
+ import json
5
+ import os
6
+ import warnings
7
+ from typing import Any
8
+
9
+ conf: dict[str, dict[str, Any]] = {}
10
+ default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
11
+ conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
12
+
13
+
14
+ def set_conf_env(conf_dict, envdict=os.environ):
15
+ """Set config values from environment variables
16
+
17
+ Looks for variables of the form ``FSSPEC_<protocol>`` and
18
+ ``FSSPEC_<protocol>_<kwarg>``. For ``FSSPEC_<protocol>`` the value is parsed
19
+ as a json dictionary and used to ``update`` the config of the
20
+ corresponding protocol. For ``FSSPEC_<protocol>_<kwarg>`` there is no
21
+ attempt to convert the string value, but the kwarg keys will be lower-cased.
22
+
23
+ The ``FSSPEC_<protocol>_<kwarg>`` variables are applied after the
24
+ ``FSSPEC_<protocol>`` ones.
25
+
26
+ Parameters
27
+ ----------
28
+ conf_dict : dict(str, dict)
29
+ This dict will be mutated
30
+ envdict : dict-like(str, str)
31
+ Source for the values - usually the real environment
32
+ """
33
+ kwarg_keys = []
34
+ for key in envdict:
35
+ if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_":
36
+ if key.count("_") > 1:
37
+ kwarg_keys.append(key)
38
+ continue
39
+ try:
40
+ value = json.loads(envdict[key])
41
+ except json.decoder.JSONDecodeError as ex:
42
+ warnings.warn(
43
+ f"Ignoring environment variable {key} due to a parse failure: {ex}"
44
+ )
45
+ else:
46
+ if isinstance(value, dict):
47
+ _, proto = key.split("_", 1)
48
+ conf_dict.setdefault(proto.lower(), {}).update(value)
49
+ else:
50
+ warnings.warn(
51
+ f"Ignoring environment variable {key} due to not being a dict:"
52
+ f" {type(value)}"
53
+ )
54
+ elif key.startswith("FSSPEC"):
55
+ warnings.warn(
56
+ f"Ignoring environment variable {key} due to having an unexpected name"
57
+ )
58
+
59
+ for key in kwarg_keys:
60
+ _, proto, kwarg = key.split("_", 2)
61
+ conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
62
+
63
+
64
+ def set_conf_files(cdir, conf_dict):
65
+ """Set config values from files
66
+
67
+ Scans for INI and JSON files in the given dictionary, and uses their
68
+ contents to set the config. In case of repeated values, later values
69
+ win.
70
+
71
+ In the case of INI files, all values are strings, and these will not
72
+ be converted.
73
+
74
+ Parameters
75
+ ----------
76
+ cdir : str
77
+ Directory to search
78
+ conf_dict : dict(str, dict)
79
+ This dict will be mutated
80
+ """
81
+ if not os.path.isdir(cdir):
82
+ return
83
+ allfiles = sorted(os.listdir(cdir))
84
+ for fn in allfiles:
85
+ if fn.endswith(".ini"):
86
+ ini = configparser.ConfigParser()
87
+ ini.read(os.path.join(cdir, fn))
88
+ for key in ini:
89
+ if key == "DEFAULT":
90
+ continue
91
+ conf_dict.setdefault(key, {}).update(dict(ini[key]))
92
+ if fn.endswith(".json"):
93
+ with open(os.path.join(cdir, fn)) as f:
94
+ js = json.load(f)
95
+ for key in js:
96
+ conf_dict.setdefault(key, {}).update(dict(js[key]))
97
+
98
+
99
+ def apply_config(cls, kwargs, conf_dict=None):
100
+ """Supply default values for kwargs when instantiating class
101
+
102
+ Augments the passed kwargs, by finding entries in the config dict
103
+ which match the classes ``.protocol`` attribute (one or more str)
104
+
105
+ Parameters
106
+ ----------
107
+ cls : file system implementation
108
+ kwargs : dict
109
+ conf_dict : dict of dict
110
+ Typically this is the global configuration
111
+
112
+ Returns
113
+ -------
114
+ dict : the modified set of kwargs
115
+ """
116
+ if conf_dict is None:
117
+ conf_dict = conf
118
+ protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
119
+ kw = {}
120
+ for proto in protos:
121
+ # default kwargs from the current state of the config
122
+ if proto in conf_dict:
123
+ kw.update(conf_dict[proto])
124
+ # explicit kwargs always win
125
+ kw.update(**kwargs)
126
+ kwargs = kw
127
+ return kwargs
128
+
129
+
130
+ set_conf_files(conf_dir, conf)
131
+ set_conf_env(conf)
venv/lib/python3.10/site-packages/fsspec/conftest.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import time
6
+
7
+ import pytest
8
+
9
+ import fsspec
10
+ from fsspec.implementations.cached import CachingFileSystem
11
+
12
+
13
+ @pytest.fixture()
14
+ def m():
15
+ """
16
+ Fixture providing a memory filesystem.
17
+ """
18
+ m = fsspec.filesystem("memory")
19
+ m.store.clear()
20
+ m.pseudo_dirs.clear()
21
+ m.pseudo_dirs.append("")
22
+ try:
23
+ yield m
24
+ finally:
25
+ m.store.clear()
26
+ m.pseudo_dirs.clear()
27
+ m.pseudo_dirs.append("")
28
+
29
+
30
+ @pytest.fixture
31
+ def ftp_writable(tmpdir):
32
+ """
33
+ Fixture providing a writable FTP filesystem.
34
+ """
35
+ pytest.importorskip("pyftpdlib")
36
+ from fsspec.implementations.ftp import FTPFileSystem
37
+
38
+ FTPFileSystem.clear_instance_cache() # remove lingering connections
39
+ CachingFileSystem.clear_instance_cache()
40
+ d = str(tmpdir)
41
+ with open(os.path.join(d, "out"), "wb") as f:
42
+ f.write(b"hello" * 10000)
43
+ P = subprocess.Popen(
44
+ [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
45
+ )
46
+ try:
47
+ time.sleep(1)
48
+ yield "localhost", 2121, "user", "pass"
49
+ finally:
50
+ P.terminate()
51
+ P.wait()
52
+ try:
53
+ shutil.rmtree(tmpdir)
54
+ except Exception:
55
+ pass
venv/lib/python3.10/site-packages/fsspec/core.py ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import logging
5
+ import os
6
+ import re
7
+ from glob import has_magic
8
+ from pathlib import Path
9
+
10
+ # for backwards compat, we export cache things from here too
11
+ from .caching import ( # noqa: F401
12
+ BaseCache,
13
+ BlockCache,
14
+ BytesCache,
15
+ MMapCache,
16
+ ReadAheadCache,
17
+ caches,
18
+ )
19
+ from .compression import compr
20
+ from .registry import filesystem, get_filesystem_class
21
+ from .utils import (
22
+ _unstrip_protocol,
23
+ build_name_function,
24
+ infer_compression,
25
+ stringify_path,
26
+ )
27
+
28
+ logger = logging.getLogger("fsspec")
29
+
30
+
31
+ class OpenFile:
32
+ """
33
+ File-like object to be used in a context
34
+
35
+ Can layer (buffered) text-mode and compression over any file-system, which
36
+ are typically binary-only.
37
+
38
+ These instances are safe to serialize, as the low-level file object
39
+ is not created until invoked using ``with``.
40
+
41
+ Parameters
42
+ ----------
43
+ fs: FileSystem
44
+ The file system to use for opening the file. Should be a subclass or duck-type
45
+ with ``fsspec.spec.AbstractFileSystem``
46
+ path: str
47
+ Location to open
48
+ mode: str like 'rb', optional
49
+ Mode of the opened file
50
+ compression: str or None, optional
51
+ Compression to apply
52
+ encoding: str or None, optional
53
+ The encoding to use if opened in text mode.
54
+ errors: str or None, optional
55
+ How to handle encoding errors if opened in text mode.
56
+ newline: None or str
57
+ Passed to TextIOWrapper in text mode, how to handle line endings.
58
+ autoopen: bool
59
+ If True, calls open() immediately. Mostly used by pickle
60
+ pos: int
61
+ If given and autoopen is True, seek to this location immediately
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ fs,
67
+ path,
68
+ mode="rb",
69
+ compression=None,
70
+ encoding=None,
71
+ errors=None,
72
+ newline=None,
73
+ ):
74
+ self.fs = fs
75
+ self.path = path
76
+ self.mode = mode
77
+ self.compression = get_compression(path, compression)
78
+ self.encoding = encoding
79
+ self.errors = errors
80
+ self.newline = newline
81
+ self.fobjects = []
82
+
83
+ def __reduce__(self):
84
+ return (
85
+ OpenFile,
86
+ (
87
+ self.fs,
88
+ self.path,
89
+ self.mode,
90
+ self.compression,
91
+ self.encoding,
92
+ self.errors,
93
+ self.newline,
94
+ ),
95
+ )
96
+
97
+ def __repr__(self):
98
+ return f"<OpenFile '{self.path}'>"
99
+
100
+ def __enter__(self):
101
+ mode = self.mode.replace("t", "").replace("b", "") + "b"
102
+
103
+ f = self.fs.open(self.path, mode=mode)
104
+
105
+ self.fobjects = [f]
106
+
107
+ if self.compression is not None:
108
+ compress = compr[self.compression]
109
+ f = compress(f, mode=mode[0])
110
+ self.fobjects.append(f)
111
+
112
+ if "b" not in self.mode:
113
+ # assume, for example, that 'r' is equivalent to 'rt' as in builtin
114
+ f = PickleableTextIOWrapper(
115
+ f, encoding=self.encoding, errors=self.errors, newline=self.newline
116
+ )
117
+ self.fobjects.append(f)
118
+
119
+ return self.fobjects[-1]
120
+
121
+ def __exit__(self, *args):
122
+ self.close()
123
+
124
+ @property
125
+ def full_name(self):
126
+ return _unstrip_protocol(self.path, self.fs)
127
+
128
+ def open(self):
129
+ """Materialise this as a real open file without context
130
+
131
+ The OpenFile object should be explicitly closed to avoid enclosed file
132
+ instances persisting. You must, therefore, keep a reference to the OpenFile
133
+ during the life of the file-like it generates.
134
+ """
135
+ return self.__enter__()
136
+
137
+ def close(self):
138
+ """Close all encapsulated file objects"""
139
+ for f in reversed(self.fobjects):
140
+ if "r" not in self.mode and not f.closed:
141
+ f.flush()
142
+ f.close()
143
+ self.fobjects.clear()
144
+
145
+
146
+ class OpenFiles(list):
147
+ """List of OpenFile instances
148
+
149
+ Can be used in a single context, which opens and closes all of the
150
+ contained files. Normal list access to get the elements works as
151
+ normal.
152
+
153
+ A special case is made for caching filesystems - the files will
154
+ be down/uploaded together at the start or end of the context, and
155
+ this may happen concurrently, if the target filesystem supports it.
156
+ """
157
+
158
+ def __init__(self, *args, mode="rb", fs=None):
159
+ self.mode = mode
160
+ self.fs = fs
161
+ self.files = []
162
+ super().__init__(*args)
163
+
164
+ def __enter__(self):
165
+ if self.fs is None:
166
+ raise ValueError("Context has already been used")
167
+
168
+ fs = self.fs
169
+ while True:
170
+ if hasattr(fs, "open_many"):
171
+ # check for concurrent cache download; or set up for upload
172
+ self.files = fs.open_many(self)
173
+ return self.files
174
+ if hasattr(fs, "fs") and fs.fs is not None:
175
+ fs = fs.fs
176
+ else:
177
+ break
178
+ return [s.__enter__() for s in self]
179
+
180
+ def __exit__(self, *args):
181
+ fs = self.fs
182
+ [s.__exit__(*args) for s in self]
183
+ if "r" not in self.mode:
184
+ while True:
185
+ if hasattr(fs, "open_many"):
186
+ # check for concurrent cache upload
187
+ fs.commit_many(self.files)
188
+ return
189
+ if hasattr(fs, "fs") and fs.fs is not None:
190
+ fs = fs.fs
191
+ else:
192
+ break
193
+
194
+ def __getitem__(self, item):
195
+ out = super().__getitem__(item)
196
+ if isinstance(item, slice):
197
+ return OpenFiles(out, mode=self.mode, fs=self.fs)
198
+ return out
199
+
200
+ def __repr__(self):
201
+ return f"<List of {len(self)} OpenFile instances>"
202
+
203
+
204
+ def open_files(
205
+ urlpath,
206
+ mode="rb",
207
+ compression=None,
208
+ encoding="utf8",
209
+ errors=None,
210
+ name_function=None,
211
+ num=1,
212
+ protocol=None,
213
+ newline=None,
214
+ auto_mkdir=True,
215
+ expand=True,
216
+ **kwargs,
217
+ ):
218
+ """Given a path or paths, return a list of ``OpenFile`` objects.
219
+
220
+ For writing, a str path must contain the "*" character, which will be filled
221
+ in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
222
+
223
+ For either reading or writing, can instead provide explicit list of paths.
224
+
225
+ Parameters
226
+ ----------
227
+ urlpath: string or list
228
+ Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
229
+ to read from alternative filesystems. To read from multiple files you
230
+ can pass a globstring or a list of paths, with the caveat that they
231
+ must all have the same protocol.
232
+ mode: 'rb', 'wt', etc.
233
+ compression: string or None
234
+ If given, open file using compression codec. Can either be a compression
235
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
236
+ compression from the filename suffix.
237
+ encoding: str
238
+ For text mode only
239
+ errors: None or str
240
+ Passed to TextIOWrapper in text mode
241
+ name_function: function or None
242
+ if opening a set of files for writing, those files do not yet exist,
243
+ so we need to generate their names by formatting the urlpath for
244
+ each sequence number
245
+ num: int [1]
246
+ if writing mode, number of files we expect to create (passed to
247
+ name+function)
248
+ protocol: str or None
249
+ If given, overrides the protocol found in the URL.
250
+ newline: bytes or None
251
+ Used for line terminator in text mode. If None, uses system default;
252
+ if blank, uses no translation.
253
+ auto_mkdir: bool (True)
254
+ If in write mode, this will ensure the target directory exists before
255
+ writing, by calling ``fs.mkdirs(exist_ok=True)``.
256
+ expand: bool
257
+ **kwargs: dict
258
+ Extra options that make sense to a particular storage connection, e.g.
259
+ host, port, username, password, etc.
260
+
261
+ Examples
262
+ --------
263
+ >>> files = open_files('2015-*-*.csv') # doctest: +SKIP
264
+ >>> files = open_files(
265
+ ... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
266
+ ... ) # doctest: +SKIP
267
+
268
+ Returns
269
+ -------
270
+ An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
271
+ be used as a single context
272
+
273
+ Notes
274
+ -----
275
+ For a full list of the available protocols and the implementations that
276
+ they map across to see the latest online documentation:
277
+
278
+ - For implementations built into ``fsspec`` see
279
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
280
+ - For implementations in separate packages see
281
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
282
+ """
283
+ fs, fs_token, paths = get_fs_token_paths(
284
+ urlpath,
285
+ mode,
286
+ num=num,
287
+ name_function=name_function,
288
+ storage_options=kwargs,
289
+ protocol=protocol,
290
+ expand=expand,
291
+ )
292
+ if fs.protocol == "file":
293
+ fs.auto_mkdir = auto_mkdir
294
+ elif "r" not in mode and auto_mkdir:
295
+ parents = {fs._parent(path) for path in paths}
296
+ for parent in parents:
297
+ try:
298
+ fs.makedirs(parent, exist_ok=True)
299
+ except PermissionError:
300
+ pass
301
+ return OpenFiles(
302
+ [
303
+ OpenFile(
304
+ fs,
305
+ path,
306
+ mode=mode,
307
+ compression=compression,
308
+ encoding=encoding,
309
+ errors=errors,
310
+ newline=newline,
311
+ )
312
+ for path in paths
313
+ ],
314
+ mode=mode,
315
+ fs=fs,
316
+ )
317
+
318
+
319
+ def _un_chain(path, kwargs):
320
+ x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
321
+ bits = (
322
+ [p if "://" in p or x.match(p) else p + "://" for p in path.split("::")]
323
+ if "::" in path
324
+ else [path]
325
+ )
326
+ # [[url, protocol, kwargs], ...]
327
+ out = []
328
+ previous_bit = None
329
+ kwargs = kwargs.copy()
330
+ for bit in reversed(bits):
331
+ protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
332
+ cls = get_filesystem_class(protocol)
333
+ extra_kwargs = cls._get_kwargs_from_urls(bit)
334
+ kws = kwargs.pop(protocol, {})
335
+ if bit is bits[0]:
336
+ kws.update(kwargs)
337
+ kw = dict(**extra_kwargs, **kws)
338
+ bit = cls._strip_protocol(bit)
339
+ if (
340
+ protocol in {"blockcache", "filecache", "simplecache"}
341
+ and "target_protocol" not in kw
342
+ ):
343
+ bit = previous_bit
344
+ out.append((bit, protocol, kw))
345
+ previous_bit = bit
346
+ out.reverse()
347
+ return out
348
+
349
+
350
+ def url_to_fs(url, **kwargs):
351
+ """
352
+ Turn fully-qualified and potentially chained URL into filesystem instance
353
+
354
+ Parameters
355
+ ----------
356
+ url : str
357
+ The fsspec-compatible URL
358
+ **kwargs: dict
359
+ Extra options that make sense to a particular storage connection, e.g.
360
+ host, port, username, password, etc.
361
+
362
+ Returns
363
+ -------
364
+ filesystem : FileSystem
365
+ The new filesystem discovered from ``url`` and created with
366
+ ``**kwargs``.
367
+ urlpath : str
368
+ The file-systems-specific URL for ``url``.
369
+ """
370
+ # non-FS arguments that appear in fsspec.open()
371
+ # inspect could keep this in sync with open()'s signature
372
+ known_kwargs = {
373
+ "compression",
374
+ "encoding",
375
+ "errors",
376
+ "expand",
377
+ "mode",
378
+ "name_function",
379
+ "newline",
380
+ "num",
381
+ }
382
+ kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs}
383
+ chain = _un_chain(url, kwargs)
384
+ inkwargs = {}
385
+ # Reverse iterate the chain, creating a nested target_* structure
386
+ for i, ch in enumerate(reversed(chain)):
387
+ urls, protocol, kw = ch
388
+ if i == len(chain) - 1:
389
+ inkwargs = dict(**kw, **inkwargs)
390
+ continue
391
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
392
+ inkwargs["target_protocol"] = protocol
393
+ inkwargs["fo"] = urls
394
+ urlpath, protocol, _ = chain[0]
395
+ fs = filesystem(protocol, **inkwargs)
396
+ return fs, urlpath
397
+
398
+
399
+ def open(
400
+ urlpath,
401
+ mode="rb",
402
+ compression=None,
403
+ encoding="utf8",
404
+ errors=None,
405
+ protocol=None,
406
+ newline=None,
407
+ **kwargs,
408
+ ):
409
+ """Given a path or paths, return one ``OpenFile`` object.
410
+
411
+ Parameters
412
+ ----------
413
+ urlpath: string or list
414
+ Absolute or relative filepath. Prefix with a protocol like ``s3://``
415
+ to read from alternative filesystems. Should not include glob
416
+ character(s).
417
+ mode: 'rb', 'wt', etc.
418
+ compression: string or None
419
+ If given, open file using compression codec. Can either be a compression
420
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
421
+ compression from the filename suffix.
422
+ encoding: str
423
+ For text mode only
424
+ errors: None or str
425
+ Passed to TextIOWrapper in text mode
426
+ protocol: str or None
427
+ If given, overrides the protocol found in the URL.
428
+ newline: bytes or None
429
+ Used for line terminator in text mode. If None, uses system default;
430
+ if blank, uses no translation.
431
+ **kwargs: dict
432
+ Extra options that make sense to a particular storage connection, e.g.
433
+ host, port, username, password, etc.
434
+
435
+ Examples
436
+ --------
437
+ >>> openfile = open('2015-01-01.csv') # doctest: +SKIP
438
+ >>> openfile = open(
439
+ ... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
440
+ ... ) # doctest: +SKIP
441
+ >>> with openfile as f:
442
+ ... df = pd.read_csv(f) # doctest: +SKIP
443
+ ...
444
+
445
+ Returns
446
+ -------
447
+ ``OpenFile`` object.
448
+
449
+ Notes
450
+ -----
451
+ For a full list of the available protocols and the implementations that
452
+ they map across to see the latest online documentation:
453
+
454
+ - For implementations built into ``fsspec`` see
455
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
456
+ - For implementations in separate packages see
457
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
458
+ """
459
+ kw = {"expand": False}
460
+ kw.update(kwargs)
461
+ out = open_files(
462
+ urlpath=[urlpath],
463
+ mode=mode,
464
+ compression=compression,
465
+ encoding=encoding,
466
+ errors=errors,
467
+ protocol=protocol,
468
+ newline=newline,
469
+ **kw,
470
+ )
471
+ if not out:
472
+ raise FileNotFoundError(urlpath)
473
+ return out[0]
474
+
475
+
476
+ def open_local(
477
+ url: str | list[str] | Path | list[Path],
478
+ mode: str = "rb",
479
+ **storage_options: dict,
480
+ ) -> str | list[str]:
481
+ """Open file(s) which can be resolved to local
482
+
483
+ For files which either are local, or get downloaded upon open
484
+ (e.g., by file caching)
485
+
486
+ Parameters
487
+ ----------
488
+ url: str or list(str)
489
+ mode: str
490
+ Must be read mode
491
+ storage_options:
492
+ passed on to FS for or used by open_files (e.g., compression)
493
+ """
494
+ if "r" not in mode:
495
+ raise ValueError("Can only ensure local files when reading")
496
+ of = open_files(url, mode=mode, **storage_options)
497
+ if not getattr(of[0].fs, "local_file", False):
498
+ raise ValueError(
499
+ "open_local can only be used on a filesystem which"
500
+ " has attribute local_file=True"
501
+ )
502
+ with of as files:
503
+ paths = [f.name for f in files]
504
+ if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path):
505
+ return paths[0]
506
+ return paths
507
+
508
+
509
+ def get_compression(urlpath, compression):
510
+ if compression == "infer":
511
+ compression = infer_compression(urlpath)
512
+ if compression is not None and compression not in compr:
513
+ raise ValueError(f"Compression type {compression} not supported")
514
+ return compression
515
+
516
+
517
+ def split_protocol(urlpath):
518
+ """Return protocol, path pair"""
519
+ urlpath = stringify_path(urlpath)
520
+ if "://" in urlpath:
521
+ protocol, path = urlpath.split("://", 1)
522
+ if len(protocol) > 1:
523
+ # excludes Windows paths
524
+ return protocol, path
525
+ if urlpath.startswith("data:"):
526
+ return urlpath.split(":", 1)
527
+ return None, urlpath
528
+
529
+
530
+ def strip_protocol(urlpath):
531
+ """Return only path part of full URL, according to appropriate backend"""
532
+ protocol, _ = split_protocol(urlpath)
533
+ cls = get_filesystem_class(protocol)
534
+ return cls._strip_protocol(urlpath)
535
+
536
+
537
+ def expand_paths_if_needed(paths, mode, num, fs, name_function):
538
+ """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
539
+ in them (read mode).
540
+
541
+ :param paths: list of paths
542
+ mode: str
543
+ Mode in which to open files.
544
+ num: int
545
+ If opening in writing mode, number of files we expect to create.
546
+ fs: filesystem object
547
+ name_function: callable
548
+ If opening in writing mode, this callable is used to generate path
549
+ names. Names are generated for each partition by
550
+ ``urlpath.replace('*', name_function(partition_index))``.
551
+ :return: list of paths
552
+ """
553
+ expanded_paths = []
554
+ paths = list(paths)
555
+
556
+ if "w" in mode: # read mode
557
+ if sum([1 for p in paths if "*" in p]) > 1:
558
+ raise ValueError(
559
+ "When writing data, only one filename mask can be specified."
560
+ )
561
+ num = max(num, len(paths))
562
+
563
+ for curr_path in paths:
564
+ if "*" in curr_path:
565
+ # expand using name_function
566
+ expanded_paths.extend(_expand_paths(curr_path, name_function, num))
567
+ else:
568
+ expanded_paths.append(curr_path)
569
+ # if we generated more paths that asked for, trim the list
570
+ if len(expanded_paths) > num:
571
+ expanded_paths = expanded_paths[:num]
572
+
573
+ else: # read mode
574
+ for curr_path in paths:
575
+ if has_magic(curr_path):
576
+ # expand using glob
577
+ expanded_paths.extend(fs.glob(curr_path))
578
+ else:
579
+ expanded_paths.append(curr_path)
580
+
581
+ return expanded_paths
582
+
583
+
584
+ def get_fs_token_paths(
585
+ urlpath,
586
+ mode="rb",
587
+ num=1,
588
+ name_function=None,
589
+ storage_options=None,
590
+ protocol=None,
591
+ expand=True,
592
+ ):
593
+ """Filesystem, deterministic token, and paths from a urlpath and options.
594
+
595
+ Parameters
596
+ ----------
597
+ urlpath: string or iterable
598
+ Absolute or relative filepath, URL (may include protocols like
599
+ ``s3://``), or globstring pointing to data.
600
+ mode: str, optional
601
+ Mode in which to open files.
602
+ num: int, optional
603
+ If opening in writing mode, number of files we expect to create.
604
+ name_function: callable, optional
605
+ If opening in writing mode, this callable is used to generate path
606
+ names. Names are generated for each partition by
607
+ ``urlpath.replace('*', name_function(partition_index))``.
608
+ storage_options: dict, optional
609
+ Additional keywords to pass to the filesystem class.
610
+ protocol: str or None
611
+ To override the protocol specifier in the URL
612
+ expand: bool
613
+ Expand string paths for writing, assuming the path is a directory
614
+ """
615
+ if isinstance(urlpath, (list, tuple, set)):
616
+ if not urlpath:
617
+ raise ValueError("empty urlpath sequence")
618
+ urlpath0 = stringify_path(list(urlpath)[0])
619
+ else:
620
+ urlpath0 = stringify_path(urlpath)
621
+ storage_options = storage_options or {}
622
+ if protocol:
623
+ storage_options["protocol"] = protocol
624
+ chain = _un_chain(urlpath0, storage_options or {})
625
+ inkwargs = {}
626
+ # Reverse iterate the chain, creating a nested target_* structure
627
+ for i, ch in enumerate(reversed(chain)):
628
+ urls, nested_protocol, kw = ch
629
+ if i == len(chain) - 1:
630
+ inkwargs = dict(**kw, **inkwargs)
631
+ continue
632
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
633
+ inkwargs["target_protocol"] = nested_protocol
634
+ inkwargs["fo"] = urls
635
+ paths, protocol, _ = chain[0]
636
+ fs = filesystem(protocol, **inkwargs)
637
+ if isinstance(urlpath, (list, tuple, set)):
638
+ pchains = [
639
+ _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
640
+ ]
641
+ if len({pc[1] for pc in pchains}) > 1:
642
+ raise ValueError("Protocol mismatch getting fs from %s", urlpath)
643
+ paths = [pc[0] for pc in pchains]
644
+ else:
645
+ paths = fs._strip_protocol(paths)
646
+ if isinstance(paths, (list, tuple, set)):
647
+ if expand:
648
+ paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
649
+ elif not isinstance(paths, list):
650
+ paths = list(paths)
651
+ else:
652
+ if "w" in mode and expand:
653
+ paths = _expand_paths(paths, name_function, num)
654
+ elif "x" in mode and expand:
655
+ paths = _expand_paths(paths, name_function, num)
656
+ elif "*" in paths:
657
+ paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
658
+ else:
659
+ paths = [paths]
660
+
661
+ return fs, fs._fs_token, paths
662
+
663
+
664
+ def _expand_paths(path, name_function, num):
665
+ if isinstance(path, str):
666
+ if path.count("*") > 1:
667
+ raise ValueError("Output path spec must contain exactly one '*'.")
668
+ elif "*" not in path:
669
+ path = os.path.join(path, "*.part")
670
+
671
+ if name_function is None:
672
+ name_function = build_name_function(num - 1)
673
+
674
+ paths = [path.replace("*", name_function(i)) for i in range(num)]
675
+ if paths != sorted(paths):
676
+ logger.warning(
677
+ "In order to preserve order between partitions"
678
+ " paths created with ``name_function`` should "
679
+ "sort to partition order"
680
+ )
681
+ elif isinstance(path, (tuple, list)):
682
+ assert len(path) == num
683
+ paths = list(path)
684
+ else:
685
+ raise ValueError(
686
+ "Path should be either\n"
687
+ "1. A list of paths: ['foo.json', 'bar.json', ...]\n"
688
+ "2. A directory: 'foo/\n"
689
+ "3. A path with a '*' in it: 'foo.*.json'"
690
+ )
691
+ return paths
692
+
693
+
694
+ class PickleableTextIOWrapper(io.TextIOWrapper):
695
+ """TextIOWrapper cannot be pickled. This solves it.
696
+
697
+ Requires that ``buffer`` be pickleable, which all instances of
698
+ AbstractBufferedFile are.
699
+ """
700
+
701
+ def __init__(
702
+ self,
703
+ buffer,
704
+ encoding=None,
705
+ errors=None,
706
+ newline=None,
707
+ line_buffering=False,
708
+ write_through=False,
709
+ ):
710
+ self.args = buffer, encoding, errors, newline, line_buffering, write_through
711
+ super().__init__(*self.args)
712
+
713
+ def __reduce__(self):
714
+ return PickleableTextIOWrapper, self.args
venv/lib/python3.10/site-packages/fsspec/dircache.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from collections.abc import MutableMapping
3
+ from functools import lru_cache
4
+
5
+
6
+ class DirCache(MutableMapping):
7
+ """
8
+ Caching of directory listings, in a structure like::
9
+
10
+ {"path0": [
11
+ {"name": "path0/file0",
12
+ "size": 123,
13
+ "type": "file",
14
+ ...
15
+ },
16
+ {"name": "path0/file1",
17
+ },
18
+ ...
19
+ ],
20
+ "path1": [...]
21
+ }
22
+
23
+ Parameters to this class control listing expiry or indeed turn
24
+ caching off
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ use_listings_cache=True,
30
+ listings_expiry_time=None,
31
+ max_paths=None,
32
+ **kwargs,
33
+ ):
34
+ """
35
+
36
+ Parameters
37
+ ----------
38
+ use_listings_cache: bool
39
+ If False, this cache never returns items, but always reports KeyError,
40
+ and setting items has no effect
41
+ listings_expiry_time: int or float (optional)
42
+ Time in seconds that a listing is considered valid. If None,
43
+ listings do not expire.
44
+ max_paths: int (optional)
45
+ The number of most recent listings that are considered valid; 'recent'
46
+ refers to when the entry was set.
47
+ """
48
+ self._cache = {}
49
+ self._times = {}
50
+ if max_paths:
51
+ self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None))
52
+ self.use_listings_cache = use_listings_cache
53
+ self.listings_expiry_time = listings_expiry_time
54
+ self.max_paths = max_paths
55
+
56
+ def __getitem__(self, item):
57
+ if self.listings_expiry_time is not None:
58
+ if self._times.get(item, 0) - time.time() < -self.listings_expiry_time:
59
+ del self._cache[item]
60
+ if self.max_paths:
61
+ self._q(item)
62
+ return self._cache[item] # maybe raises KeyError
63
+
64
+ def clear(self):
65
+ self._cache.clear()
66
+
67
+ def __len__(self):
68
+ return len(self._cache)
69
+
70
+ def __contains__(self, item):
71
+ try:
72
+ self[item]
73
+ return True
74
+ except KeyError:
75
+ return False
76
+
77
+ def __setitem__(self, key, value):
78
+ if not self.use_listings_cache:
79
+ return
80
+ if self.max_paths:
81
+ self._q(key)
82
+ self._cache[key] = value
83
+ if self.listings_expiry_time is not None:
84
+ self._times[key] = time.time()
85
+
86
+ def __delitem__(self, key):
87
+ del self._cache[key]
88
+
89
+ def __iter__(self):
90
+ entries = list(self._cache)
91
+
92
+ return (k for k in entries if k in self)
93
+
94
+ def __reduce__(self):
95
+ return (
96
+ DirCache,
97
+ (self.use_listings_cache, self.listings_expiry_time, self.max_paths),
98
+ )
venv/lib/python3.10/site-packages/fsspec/exceptions.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ fsspec user-defined exception classes
3
+ """
4
+ import asyncio
5
+
6
+
7
+ class BlocksizeMismatchError(ValueError):
8
+ """
9
+ Raised when a cached file is opened with a different blocksize than it was
10
+ written with
11
+ """
12
+
13
+
14
+ class FSTimeoutError(asyncio.TimeoutError):
15
+ """
16
+ Raised when a fsspec function timed out occurs
17
+ """
venv/lib/python3.10/site-packages/fsspec/fuse.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import os
4
+ import stat
5
+ import threading
6
+ import time
7
+ from errno import EIO, ENOENT
8
+
9
+ from fuse import FUSE, FuseOSError, LoggingMixIn, Operations
10
+
11
+ from fsspec import __version__
12
+ from fsspec.core import url_to_fs
13
+
14
+ logger = logging.getLogger("fsspec.fuse")
15
+
16
+
17
+ class FUSEr(Operations):
18
+ def __init__(self, fs, path, ready_file=False):
19
+ self.fs = fs
20
+ self.cache = {}
21
+ self.root = path.rstrip("/") + "/"
22
+ self.counter = 0
23
+ logger.info("Starting FUSE at %s", path)
24
+ self._ready_file = ready_file
25
+
26
+ def getattr(self, path, fh=None):
27
+ logger.debug("getattr %s", path)
28
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
29
+ return {"type": "file", "st_size": 5}
30
+
31
+ path = "".join([self.root, path.lstrip("/")]).rstrip("/")
32
+ try:
33
+ info = self.fs.info(path)
34
+ except FileNotFoundError:
35
+ raise FuseOSError(ENOENT)
36
+
37
+ data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)}
38
+ perm = info.get("mode", 0o777)
39
+
40
+ if info["type"] != "file":
41
+ data["st_mode"] = stat.S_IFDIR | perm
42
+ data["st_size"] = 0
43
+ data["st_blksize"] = 0
44
+ else:
45
+ data["st_mode"] = stat.S_IFREG | perm
46
+ data["st_size"] = info["size"]
47
+ data["st_blksize"] = 5 * 2**20
48
+ data["st_nlink"] = 1
49
+ data["st_atime"] = info["atime"] if "atime" in info else time.time()
50
+ data["st_ctime"] = info["ctime"] if "ctime" in info else time.time()
51
+ data["st_mtime"] = info["mtime"] if "mtime" in info else time.time()
52
+ return data
53
+
54
+ def readdir(self, path, fh):
55
+ logger.debug("readdir %s", path)
56
+ path = "".join([self.root, path.lstrip("/")])
57
+ files = self.fs.ls(path, False)
58
+ files = [os.path.basename(f.rstrip("/")) for f in files]
59
+ return [".", ".."] + files
60
+
61
+ def mkdir(self, path, mode):
62
+ path = "".join([self.root, path.lstrip("/")])
63
+ self.fs.mkdir(path)
64
+ return 0
65
+
66
+ def rmdir(self, path):
67
+ path = "".join([self.root, path.lstrip("/")])
68
+ self.fs.rmdir(path)
69
+ return 0
70
+
71
+ def read(self, path, size, offset, fh):
72
+ logger.debug("read %s", (path, size, offset))
73
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
74
+ # status indicator
75
+ return b"ready"
76
+
77
+ f = self.cache[fh]
78
+ f.seek(offset)
79
+ out = f.read(size)
80
+ return out
81
+
82
+ def write(self, path, data, offset, fh):
83
+ logger.debug("write %s", (path, offset))
84
+ f = self.cache[fh]
85
+ f.seek(offset)
86
+ f.write(data)
87
+ return len(data)
88
+
89
+ def create(self, path, flags, fi=None):
90
+ logger.debug("create %s", (path, flags))
91
+ fn = "".join([self.root, path.lstrip("/")])
92
+ self.fs.touch(fn) # OS will want to get attributes immediately
93
+ f = self.fs.open(fn, "wb")
94
+ self.cache[self.counter] = f
95
+ self.counter += 1
96
+ return self.counter - 1
97
+
98
+ def open(self, path, flags):
99
+ logger.debug("open %s", (path, flags))
100
+ fn = "".join([self.root, path.lstrip("/")])
101
+ if flags % 2 == 0:
102
+ # read
103
+ mode = "rb"
104
+ else:
105
+ # write/create
106
+ mode = "wb"
107
+ self.cache[self.counter] = self.fs.open(fn, mode)
108
+ self.counter += 1
109
+ return self.counter - 1
110
+
111
+ def truncate(self, path, length, fh=None):
112
+ fn = "".join([self.root, path.lstrip("/")])
113
+ if length != 0:
114
+ raise NotImplementedError
115
+ # maybe should be no-op since open with write sets size to zero anyway
116
+ self.fs.touch(fn)
117
+
118
+ def unlink(self, path):
119
+ fn = "".join([self.root, path.lstrip("/")])
120
+ try:
121
+ self.fs.rm(fn, False)
122
+ except (OSError, FileNotFoundError):
123
+ raise FuseOSError(EIO)
124
+
125
+ def release(self, path, fh):
126
+ try:
127
+ if fh in self.cache:
128
+ f = self.cache[fh]
129
+ f.close()
130
+ self.cache.pop(fh)
131
+ except Exception as e:
132
+ print(e)
133
+ return 0
134
+
135
+ def chmod(self, path, mode):
136
+ if hasattr(self.fs, "chmod"):
137
+ path = "".join([self.root, path.lstrip("/")])
138
+ return self.fs.chmod(path, mode)
139
+ raise NotImplementedError
140
+
141
+
142
+ def run(
143
+ fs,
144
+ path,
145
+ mount_point,
146
+ foreground=True,
147
+ threads=False,
148
+ ready_file=False,
149
+ ops_class=FUSEr,
150
+ ):
151
+ """Mount stuff in a local directory
152
+
153
+ This uses fusepy to make it appear as if a given path on an fsspec
154
+ instance is in fact resident within the local file-system.
155
+
156
+ This requires that fusepy by installed, and that FUSE be available on
157
+ the system (typically requiring a package to be installed with
158
+ apt, yum, brew, etc.).
159
+
160
+ Parameters
161
+ ----------
162
+ fs: file-system instance
163
+ From one of the compatible implementations
164
+ path: str
165
+ Location on that file-system to regard as the root directory to
166
+ mount. Note that you typically should include the terminating "/"
167
+ character.
168
+ mount_point: str
169
+ An empty directory on the local file-system where the contents of
170
+ the remote path will appear.
171
+ foreground: bool
172
+ Whether or not calling this function will block. Operation will
173
+ typically be more stable if True.
174
+ threads: bool
175
+ Whether or not to create threads when responding to file operations
176
+ within the mounter directory. Operation will typically be more
177
+ stable if False.
178
+ ready_file: bool
179
+ Whether the FUSE process is ready. The ``.fuse_ready`` file will
180
+ exist in the ``mount_point`` directory if True. Debugging purpose.
181
+ ops_class: FUSEr or Subclass of FUSEr
182
+ To override the default behavior of FUSEr. For Example, logging
183
+ to file.
184
+
185
+ """
186
+ func = lambda: FUSE(
187
+ ops_class(fs, path, ready_file=ready_file),
188
+ mount_point,
189
+ nothreads=not threads,
190
+ foreground=foreground,
191
+ )
192
+ if not foreground:
193
+ th = threading.Thread(target=func)
194
+ th.daemon = True
195
+ th.start()
196
+ return th
197
+ else: # pragma: no cover
198
+ try:
199
+ func()
200
+ except KeyboardInterrupt:
201
+ pass
202
+
203
+
204
+ def main(args):
205
+ """Mount filesystem from chained URL to MOUNT_POINT.
206
+
207
+ Examples:
208
+
209
+ python3 -m fsspec.fuse memory /usr/share /tmp/mem
210
+
211
+ python3 -m fsspec.fuse local /tmp/source /tmp/local \\
212
+ -l /tmp/fsspecfuse.log
213
+
214
+ You can also mount chained-URLs and use special settings:
215
+
216
+ python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\
217
+ / /tmp/zip \\
218
+ -o 'filecache-cache_storage=/tmp/simplecache'
219
+
220
+ You can specify the type of the setting by using `[int]` or `[bool]`,
221
+ (`true`, `yes`, `1` represents the Boolean value `True`):
222
+
223
+ python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\
224
+ /historic/packages/RPMS /tmp/ftp \\
225
+ -o 'simplecache-cache_storage=/tmp/simplecache' \\
226
+ -o 'simplecache-check_files=false[bool]' \\
227
+ -o 'ftp-listings_expiry_time=60[int]' \\
228
+ -o 'ftp-username=anonymous' \\
229
+ -o 'ftp-password=xieyanbo'
230
+ """
231
+
232
+ class RawDescriptionArgumentParser(argparse.ArgumentParser):
233
+ def format_help(self):
234
+ usage = super().format_help()
235
+ parts = usage.split("\n\n")
236
+ parts[1] = self.description.rstrip()
237
+ return "\n\n".join(parts)
238
+
239
+ parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__)
240
+ parser.add_argument("--version", action="version", version=__version__)
241
+ parser.add_argument("url", type=str, help="fs url")
242
+ parser.add_argument("source_path", type=str, help="source directory in fs")
243
+ parser.add_argument("mount_point", type=str, help="local directory")
244
+ parser.add_argument(
245
+ "-o",
246
+ "--option",
247
+ action="append",
248
+ help="Any options of protocol included in the chained URL",
249
+ )
250
+ parser.add_argument(
251
+ "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')"
252
+ )
253
+ parser.add_argument(
254
+ "-f",
255
+ "--foreground",
256
+ action="store_false",
257
+ help="Running in foreground or not (Default: False)",
258
+ )
259
+ parser.add_argument(
260
+ "-t",
261
+ "--threads",
262
+ action="store_false",
263
+ help="Running with threads support (Default: False)",
264
+ )
265
+ parser.add_argument(
266
+ "-r",
267
+ "--ready-file",
268
+ action="store_false",
269
+ help="The `.fuse_ready` file will exist after FUSE is ready. "
270
+ "(Debugging purpose, Default: False)",
271
+ )
272
+ args = parser.parse_args(args)
273
+
274
+ kwargs = {}
275
+ for item in args.option or []:
276
+ key, sep, value = item.partition("=")
277
+ if not sep:
278
+ parser.error(message=f"Wrong option: {item!r}")
279
+ val = value.lower()
280
+ if val.endswith("[int]"):
281
+ value = int(value[: -len("[int]")])
282
+ elif val.endswith("[bool]"):
283
+ value = val[: -len("[bool]")] in ["1", "yes", "true"]
284
+
285
+ if "-" in key:
286
+ fs_name, setting_name = key.split("-", 1)
287
+ if fs_name in kwargs:
288
+ kwargs[fs_name][setting_name] = value
289
+ else:
290
+ kwargs[fs_name] = {setting_name: value}
291
+ else:
292
+ kwargs[key] = value
293
+
294
+ if args.log_file:
295
+ logging.basicConfig(
296
+ level=logging.DEBUG,
297
+ filename=args.log_file,
298
+ format="%(asctime)s %(message)s",
299
+ )
300
+
301
+ class LoggingFUSEr(FUSEr, LoggingMixIn):
302
+ pass
303
+
304
+ fuser = LoggingFUSEr
305
+ else:
306
+ fuser = FUSEr
307
+
308
+ fs, url_path = url_to_fs(args.url, **kwargs)
309
+ logger.debug("Mounting %s to %s", url_path, str(args.mount_point))
310
+ run(
311
+ fs,
312
+ args.source_path,
313
+ args.mount_point,
314
+ foreground=args.foreground,
315
+ threads=args.threads,
316
+ ready_file=args.ready_file,
317
+ ops_class=fuser,
318
+ )
319
+
320
+
321
+ if __name__ == "__main__":
322
+ import sys
323
+
324
+ main(sys.argv[1:])
venv/lib/python3.10/site-packages/fsspec/generic.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import shutil
7
+ import uuid
8
+ from typing import Optional
9
+
10
+ from .asyn import AsyncFileSystem, _run_coros_in_chunks, sync_wrapper
11
+ from .callbacks import DEFAULT_CALLBACK
12
+ from .core import filesystem, get_filesystem_class, split_protocol, url_to_fs
13
+
14
+ _generic_fs = {}
15
+ logger = logging.getLogger("fsspec.generic")
16
+
17
+
18
+ def set_generic_fs(protocol, **storage_options):
19
+ _generic_fs[protocol] = filesystem(protocol, **storage_options)
20
+
21
+
22
+ default_method = "default"
23
+
24
+
25
+ def _resolve_fs(url, method=None, protocol=None, storage_options=None):
26
+ """Pick instance of backend FS"""
27
+ method = method or default_method
28
+ protocol = protocol or split_protocol(url)[0]
29
+ storage_options = storage_options or {}
30
+ if method == "default":
31
+ return filesystem(protocol)
32
+ if method == "generic":
33
+ return _generic_fs[protocol]
34
+ if method == "current":
35
+ cls = get_filesystem_class(protocol)
36
+ return cls.current()
37
+ if method == "options":
38
+ fs, _ = url_to_fs(url, **storage_options.get(protocol, {}))
39
+ return fs
40
+ raise ValueError(f"Unknown FS resolution method: {method}")
41
+
42
+
43
+ def rsync(
44
+ source,
45
+ destination,
46
+ delete_missing=False,
47
+ source_field="size",
48
+ dest_field="size",
49
+ update_cond="different",
50
+ inst_kwargs=None,
51
+ fs=None,
52
+ **kwargs,
53
+ ):
54
+ """Sync files between two directory trees
55
+
56
+ (experimental)
57
+
58
+ Parameters
59
+ ----------
60
+ source: str
61
+ Root of the directory tree to take files from. This must be a directory, but
62
+ do not include any terminating "/" character
63
+ destination: str
64
+ Root path to copy into. The contents of this location should be
65
+ identical to the contents of ``source`` when done. This will be made a
66
+ directory, and the terminal "/" should not be included.
67
+ delete_missing: bool
68
+ If there are paths in the destination that don't exist in the
69
+ source and this is True, delete them. Otherwise, leave them alone.
70
+ source_field: str | callable
71
+ If ``update_field`` is "different", this is the key in the info
72
+ of source files to consider for difference. Maybe a function of the
73
+ info dict.
74
+ dest_field: str | callable
75
+ If ``update_field`` is "different", this is the key in the info
76
+ of destination files to consider for difference. May be a function of
77
+ the info dict.
78
+ update_cond: "different"|"always"|"never"
79
+ If "always", every file is copied, regardless of whether it exists in
80
+ the destination. If "never", files that exist in the destination are
81
+ not copied again. If "different" (default), only copy if the info
82
+ fields given by ``source_field`` and ``dest_field`` (usually "size")
83
+ are different. Other comparisons may be added in the future.
84
+ inst_kwargs: dict|None
85
+ If ``fs`` is None, use this set of keyword arguments to make a
86
+ GenericFileSystem instance
87
+ fs: GenericFileSystem|None
88
+ Instance to use if explicitly given. The instance defines how to
89
+ to make downstream file system instances from paths.
90
+
91
+ Returns
92
+ -------
93
+ dict of the copy operations that were performed, {source: destination}
94
+ """
95
+ fs = fs or GenericFileSystem(**(inst_kwargs or {}))
96
+ source = fs._strip_protocol(source)
97
+ destination = fs._strip_protocol(destination)
98
+ allfiles = fs.find(source, withdirs=True, detail=True)
99
+ if not fs.isdir(source):
100
+ raise ValueError("Can only rsync on a directory")
101
+ otherfiles = fs.find(destination, withdirs=True, detail=True)
102
+ dirs = [
103
+ a
104
+ for a, v in allfiles.items()
105
+ if v["type"] == "directory" and a.replace(source, destination) not in otherfiles
106
+ ]
107
+ logger.debug(f"{len(dirs)} directories to create")
108
+ if dirs:
109
+ fs.make_many_dirs(
110
+ [dirn.replace(source, destination) for dirn in dirs], exist_ok=True
111
+ )
112
+ allfiles = {a: v for a, v in allfiles.items() if v["type"] == "file"}
113
+ logger.debug(f"{len(allfiles)} files to consider for copy")
114
+ to_delete = [
115
+ o
116
+ for o, v in otherfiles.items()
117
+ if o.replace(destination, source) not in allfiles and v["type"] == "file"
118
+ ]
119
+ for k, v in allfiles.copy().items():
120
+ otherfile = k.replace(source, destination)
121
+ if otherfile in otherfiles:
122
+ if update_cond == "always":
123
+ allfiles[k] = otherfile
124
+ elif update_cond == "different":
125
+ inf1 = source_field(v) if callable(source_field) else v[source_field]
126
+ v2 = otherfiles[otherfile]
127
+ inf2 = dest_field(v2) if callable(dest_field) else v2[dest_field]
128
+ if inf1 != inf2:
129
+ # details mismatch, make copy
130
+ allfiles[k] = otherfile
131
+ else:
132
+ # details match, don't copy
133
+ allfiles.pop(k)
134
+ else:
135
+ # file not in target yet
136
+ allfiles[k] = otherfile
137
+ logger.debug(f"{len(allfiles)} files to copy")
138
+ if allfiles:
139
+ source_files, target_files = zip(*allfiles.items())
140
+ fs.cp(source_files, target_files, **kwargs)
141
+ logger.debug(f"{len(to_delete)} files to delete")
142
+ if delete_missing:
143
+ fs.rm(to_delete)
144
+ return allfiles
145
+
146
+
147
+ class GenericFileSystem(AsyncFileSystem):
148
+ """Wrapper over all other FS types
149
+
150
+ <experimental!>
151
+
152
+ This implementation is a single unified interface to be able to run FS operations
153
+ over generic URLs, and dispatch to the specific implementations using the URL
154
+ protocol prefix.
155
+
156
+ Note: instances of this FS are always async, even if you never use it with any async
157
+ backend.
158
+ """
159
+
160
+ protocol = "generic" # there is no real reason to ever use a protocol with this FS
161
+
162
+ def __init__(self, default_method="default", **kwargs):
163
+ """
164
+
165
+ Parameters
166
+ ----------
167
+ default_method: str (optional)
168
+ Defines how to configure backend FS instances. Options are:
169
+ - "default": instantiate like FSClass(), with no
170
+ extra arguments; this is the default instance of that FS, and can be
171
+ configured via the config system
172
+ - "generic": takes instances from the `_generic_fs` dict in this module,
173
+ which you must populate before use. Keys are by protocol
174
+ - "current": takes the most recently instantiated version of each FS
175
+ """
176
+ self.method = default_method
177
+ super().__init__(**kwargs)
178
+
179
+ def _parent(self, path):
180
+ fs = _resolve_fs(path, self.method)
181
+ return fs.unstrip_protocol(fs._parent(path))
182
+
183
+ def _strip_protocol(self, path):
184
+ # normalization only
185
+ fs = _resolve_fs(path, self.method)
186
+ return fs.unstrip_protocol(fs._strip_protocol(path))
187
+
188
+ async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
189
+ fs = _resolve_fs(path, self.method)
190
+ if fs.async_impl:
191
+ out = await fs._find(
192
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
193
+ )
194
+ else:
195
+ out = fs.find(
196
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
197
+ )
198
+ result = {}
199
+ for k, v in out.items():
200
+ name = fs.unstrip_protocol(k)
201
+ v["name"] = name
202
+ result[name] = v
203
+ if detail:
204
+ return result
205
+ return list(result)
206
+
207
+ async def _info(self, url, **kwargs):
208
+ fs = _resolve_fs(url, self.method)
209
+ if fs.async_impl:
210
+ out = await fs._info(url, **kwargs)
211
+ else:
212
+ out = fs.info(url, **kwargs)
213
+ out["name"] = fs.unstrip_protocol(out["name"])
214
+ return out
215
+
216
+ async def _ls(
217
+ self,
218
+ url,
219
+ detail=True,
220
+ **kwargs,
221
+ ):
222
+ fs = _resolve_fs(url, self.method)
223
+ if fs.async_impl:
224
+ out = await fs._ls(url, detail=True, **kwargs)
225
+ else:
226
+ out = fs.ls(url, detail=True, **kwargs)
227
+ for o in out:
228
+ o["name"] = fs.unstrip_protocol(o["name"])
229
+ if detail:
230
+ return out
231
+ else:
232
+ return [o["name"] for o in out]
233
+
234
+ async def _cat_file(
235
+ self,
236
+ url,
237
+ **kwargs,
238
+ ):
239
+ fs = _resolve_fs(url, self.method)
240
+ if fs.async_impl:
241
+ return await fs._cat_file(url, **kwargs)
242
+ else:
243
+ return fs.cat_file(url, **kwargs)
244
+
245
+ async def _pipe_file(
246
+ self,
247
+ path,
248
+ value,
249
+ **kwargs,
250
+ ):
251
+ fs = _resolve_fs(path, self.method)
252
+ if fs.async_impl:
253
+ return await fs._pipe_file(path, value, **kwargs)
254
+ else:
255
+ return fs.pipe_file(path, value, **kwargs)
256
+
257
+ async def _rm(self, url, **kwargs):
258
+ urls = url
259
+ if isinstance(urls, str):
260
+ urls = [urls]
261
+ fs = _resolve_fs(urls[0], self.method)
262
+ if fs.async_impl:
263
+ await fs._rm(urls, **kwargs)
264
+ else:
265
+ fs.rm(url, **kwargs)
266
+
267
+ async def _makedirs(self, path, exist_ok=False):
268
+ logger.debug("Make dir %s", path)
269
+ fs = _resolve_fs(path, self.method)
270
+ if fs.async_impl:
271
+ await fs._makedirs(path, exist_ok=exist_ok)
272
+ else:
273
+ fs.makedirs(path, exist_ok=exist_ok)
274
+
275
+ def rsync(self, source, destination, **kwargs):
276
+ """Sync files between two directory trees
277
+
278
+ See `func:rsync` for more details.
279
+ """
280
+ rsync(source, destination, fs=self, **kwargs)
281
+
282
+ async def _cp_file(
283
+ self,
284
+ url,
285
+ url2,
286
+ blocksize=2**20,
287
+ callback=DEFAULT_CALLBACK,
288
+ **kwargs,
289
+ ):
290
+ fs = _resolve_fs(url, self.method)
291
+ fs2 = _resolve_fs(url2, self.method)
292
+ if fs is fs2:
293
+ # pure remote
294
+ if fs.async_impl:
295
+ return await fs._cp_file(url, url2, **kwargs)
296
+ else:
297
+ return fs.cp_file(url, url2, **kwargs)
298
+ kw = {"blocksize": 0, "cache_type": "none"}
299
+ try:
300
+ f1 = (
301
+ await fs.open_async(url, "rb")
302
+ if hasattr(fs, "open_async")
303
+ else fs.open(url, "rb", **kw)
304
+ )
305
+ callback.set_size(await maybe_await(f1.size))
306
+ f2 = (
307
+ await fs2.open_async(url2, "wb")
308
+ if hasattr(fs2, "open_async")
309
+ else fs2.open(url2, "wb", **kw)
310
+ )
311
+ while f1.size is None or f2.tell() < f1.size:
312
+ data = await maybe_await(f1.read(blocksize))
313
+ if f1.size is None and not data:
314
+ break
315
+ await maybe_await(f2.write(data))
316
+ callback.absolute_update(f2.tell())
317
+ finally:
318
+ try:
319
+ await maybe_await(f2.close())
320
+ await maybe_await(f1.close())
321
+ except NameError:
322
+ # fail while opening f1 or f2
323
+ pass
324
+
325
+ async def _make_many_dirs(self, urls, exist_ok=True):
326
+ fs = _resolve_fs(urls[0], self.method)
327
+ if fs.async_impl:
328
+ coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls]
329
+ await _run_coros_in_chunks(coros)
330
+ else:
331
+ for u in urls:
332
+ fs.makedirs(u, exist_ok=exist_ok)
333
+
334
+ make_many_dirs = sync_wrapper(_make_many_dirs)
335
+
336
+ async def _copy(
337
+ self,
338
+ path1: list[str],
339
+ path2: list[str],
340
+ recursive: bool = False,
341
+ on_error: str = "ignore",
342
+ maxdepth: Optional[int] = None,
343
+ batch_size: Optional[int] = None,
344
+ tempdir: Optional[str] = None,
345
+ **kwargs,
346
+ ):
347
+ if recursive:
348
+ raise NotImplementedError
349
+ fs = _resolve_fs(path1[0], self.method)
350
+ fs2 = _resolve_fs(path2[0], self.method)
351
+ # not expanding paths atm., assume call is from rsync()
352
+ if fs is fs2:
353
+ # pure remote
354
+ if fs.async_impl:
355
+ return await fs._copy(path1, path2, **kwargs)
356
+ else:
357
+ return fs.copy(path1, path2, **kwargs)
358
+ await copy_file_op(
359
+ fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error
360
+ )
361
+
362
+
363
+ async def copy_file_op(
364
+ fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore"
365
+ ):
366
+ import tempfile
367
+
368
+ tempdir = tempdir or tempfile.mkdtemp()
369
+ try:
370
+ coros = [
371
+ _copy_file_op(
372
+ fs1,
373
+ u1,
374
+ fs2,
375
+ u2,
376
+ os.path.join(tempdir, uuid.uuid4().hex),
377
+ on_error=on_error,
378
+ )
379
+ for u1, u2 in zip(url1, url2)
380
+ ]
381
+ await _run_coros_in_chunks(coros, batch_size=batch_size)
382
+ finally:
383
+ shutil.rmtree(tempdir)
384
+
385
+
386
+ async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"):
387
+ ex = () if on_error == "raise" else Exception
388
+ logger.debug("Copy %s -> %s", url1, url2)
389
+ try:
390
+ if fs1.async_impl:
391
+ await fs1._get_file(url1, local)
392
+ else:
393
+ fs1.get_file(url1, local)
394
+ if fs2.async_impl:
395
+ await fs2._put_file(local, url2)
396
+ else:
397
+ fs2.put_file(local, url2)
398
+ os.unlink(local)
399
+ logger.debug("Copy %s -> %s; done", url1, url2)
400
+ except ex as e:
401
+ logger.debug("ignoring cp exception for %s: %s", url1, e)
402
+
403
+
404
+ async def maybe_await(cor):
405
+ if inspect.iscoroutine(cor):
406
+ return await cor
407
+ else:
408
+ return cor
venv/lib/python3.10/site-packages/fsspec/gui.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import contextlib
3
+ import logging
4
+ import os
5
+ import re
6
+ from typing import ClassVar, Sequence
7
+
8
+ import panel as pn
9
+
10
+ from .core import OpenFile, get_filesystem_class, split_protocol
11
+ from .registry import known_implementations
12
+
13
+ pn.extension()
14
+ logger = logging.getLogger("fsspec.gui")
15
+
16
+
17
+ class SigSlot:
18
+ """Signal-slot mixin, for Panel event passing
19
+
20
+ Include this class in a widget manager's superclasses to be able to
21
+ register events and callbacks on Panel widgets managed by that class.
22
+
23
+ The method ``_register`` should be called as widgets are added, and external
24
+ code should call ``connect`` to associate callbacks.
25
+
26
+ By default, all signals emit a DEBUG logging statement.
27
+ """
28
+
29
+ # names of signals that this class may emit each of which must be
30
+ # set by _register for any new instance
31
+ signals: ClassVar[Sequence[str]] = []
32
+ # names of actions that this class may respond to
33
+ slots: ClassVar[Sequence[str]] = []
34
+
35
+ # each of which must be a method name
36
+
37
+ def __init__(self):
38
+ self._ignoring_events = False
39
+ self._sigs = {}
40
+ self._map = {}
41
+ self._setup()
42
+
43
+ def _setup(self):
44
+ """Create GUI elements and register signals"""
45
+ self.panel = pn.pane.PaneBase()
46
+ # no signals to set up in the base class
47
+
48
+ def _register(
49
+ self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
50
+ ):
51
+ """Watch the given attribute of a widget and assign it a named event
52
+
53
+ This is normally called at the time a widget is instantiated, in the
54
+ class which owns it.
55
+
56
+ Parameters
57
+ ----------
58
+ widget : pn.layout.Panel or None
59
+ Widget to watch. If None, an anonymous signal not associated with
60
+ any widget.
61
+ name : str
62
+ Name of this event
63
+ thing : str
64
+ Attribute of the given widget to watch
65
+ log_level : int
66
+ When the signal is triggered, a logging event of the given level
67
+ will be fired in the dfviz logger.
68
+ auto : bool
69
+ If True, automatically connects with a method in this class of the
70
+ same name.
71
+ """
72
+ if name not in self.signals:
73
+ raise ValueError(f"Attempt to assign an undeclared signal: {name}")
74
+ self._sigs[name] = {
75
+ "widget": widget,
76
+ "callbacks": [],
77
+ "thing": thing,
78
+ "log": log_level,
79
+ }
80
+ wn = "-".join(
81
+ [
82
+ getattr(widget, "name", str(widget)) if widget is not None else "none",
83
+ thing,
84
+ ]
85
+ )
86
+ self._map[wn] = name
87
+ if widget is not None:
88
+ widget.param.watch(self._signal, thing, onlychanged=True)
89
+ if auto and hasattr(self, name):
90
+ self.connect(name, getattr(self, name))
91
+
92
+ def _repr_mimebundle_(self, *args, **kwargs):
93
+ """Display in a notebook or a server"""
94
+ try:
95
+ return self.panel._repr_mimebundle_(*args, **kwargs)
96
+ except (ValueError, AttributeError):
97
+ raise NotImplementedError("Panel does not seem to be set " "up properly")
98
+
99
+ def connect(self, signal, slot):
100
+ """Associate call back with given event
101
+
102
+ The callback must be a function which takes the "new" value of the
103
+ watched attribute as the only parameter. If the callback return False,
104
+ this cancels any further processing of the given event.
105
+
106
+ Alternatively, the callback can be a string, in which case it means
107
+ emitting the correspondingly-named event (i.e., connect to self)
108
+ """
109
+ self._sigs[signal]["callbacks"].append(slot)
110
+
111
+ def _signal(self, event):
112
+ """This is called by a an action on a widget
113
+
114
+ Within an self.ignore_events context, nothing happens.
115
+
116
+ Tests can execute this method by directly changing the values of
117
+ widget components.
118
+ """
119
+ if not self._ignoring_events:
120
+ wn = "-".join([event.obj.name, event.name])
121
+ if wn in self._map and self._map[wn] in self._sigs:
122
+ self._emit(self._map[wn], event.new)
123
+
124
+ @contextlib.contextmanager
125
+ def ignore_events(self):
126
+ """Temporarily turn off events processing in this instance
127
+
128
+ (does not propagate to children)
129
+ """
130
+ self._ignoring_events = True
131
+ try:
132
+ yield
133
+ finally:
134
+ self._ignoring_events = False
135
+
136
+ def _emit(self, sig, value=None):
137
+ """An event happened, call its callbacks
138
+
139
+ This method can be used in tests to simulate message passing without
140
+ directly changing visual elements.
141
+
142
+ Calling of callbacks will halt whenever one returns False.
143
+ """
144
+ logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
145
+ for callback in self._sigs[sig]["callbacks"]:
146
+ if isinstance(callback, str):
147
+ self._emit(callback)
148
+ else:
149
+ try:
150
+ # running callbacks should not break the interface
151
+ ret = callback(value)
152
+ if ret is False:
153
+ break
154
+ except Exception as e:
155
+ logger.exception(
156
+ "Exception (%s) while executing callback for signal: %s",
157
+ e,
158
+ sig,
159
+ )
160
+
161
+ def show(self, threads=False):
162
+ """Open a new browser tab and display this instance's interface"""
163
+ self.panel.show(threads=threads, verbose=False)
164
+ return self
165
+
166
+
167
+ class SingleSelect(SigSlot):
168
+ """A multiselect which only allows you to select one item for an event"""
169
+
170
+ signals = ["_selected", "selected"] # the first is internal
171
+ slots = ["set_options", "set_selection", "add", "clear", "select"]
172
+
173
+ def __init__(self, **kwargs):
174
+ self.kwargs = kwargs
175
+ super().__init__()
176
+
177
+ def _setup(self):
178
+ self.panel = pn.widgets.MultiSelect(**self.kwargs)
179
+ self._register(self.panel, "_selected", "value")
180
+ self._register(None, "selected")
181
+ self.connect("_selected", self.select_one)
182
+
183
+ def _signal(self, *args, **kwargs):
184
+ super()._signal(*args, **kwargs)
185
+
186
+ def select_one(self, *_):
187
+ with self.ignore_events():
188
+ val = [self.panel.value[-1]] if self.panel.value else []
189
+ self.panel.value = val
190
+ self._emit("selected", self.panel.value)
191
+
192
+ def set_options(self, options):
193
+ self.panel.options = options
194
+
195
+ def clear(self):
196
+ self.panel.options = []
197
+
198
+ @property
199
+ def value(self):
200
+ return self.panel.value
201
+
202
+ def set_selection(self, selection):
203
+ self.panel.value = [selection]
204
+
205
+
206
+ class FileSelector(SigSlot):
207
+ """Panel-based graphical file selector widget
208
+
209
+ Instances of this widget are interactive and can be displayed in jupyter by having
210
+ them as the output of a cell, or in a separate browser tab using ``.show()``.
211
+ """
212
+
213
+ signals = [
214
+ "protocol_changed",
215
+ "selection_changed",
216
+ "directory_entered",
217
+ "home_clicked",
218
+ "up_clicked",
219
+ "go_clicked",
220
+ "filters_changed",
221
+ ]
222
+ slots = ["set_filters", "go_home"]
223
+
224
+ def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
225
+ """
226
+
227
+ Parameters
228
+ ----------
229
+ url : str (optional)
230
+ Initial value of the URL to populate the dialog; should include protocol
231
+ filters : list(str) (optional)
232
+ File endings to include in the listings. If not included, all files are
233
+ allowed. Does not affect directories.
234
+ If given, the endings will appear as checkboxes in the interface
235
+ ignore : list(str) (optional)
236
+ Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
237
+ hidden files on posix
238
+ kwargs : dict (optional)
239
+ To pass to file system instance
240
+ """
241
+ if url:
242
+ self.init_protocol, url = split_protocol(url)
243
+ else:
244
+ self.init_protocol, url = "file", os.getcwd()
245
+ self.init_url = url
246
+ self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}"
247
+ self.filters = filters
248
+ self.ignore = [re.compile(i) for i in ignore or []]
249
+ self._fs = None
250
+ super().__init__()
251
+
252
+ def _setup(self):
253
+ self.url = pn.widgets.TextInput(
254
+ name="url",
255
+ value=self.init_url,
256
+ align="end",
257
+ sizing_mode="stretch_width",
258
+ width_policy="max",
259
+ )
260
+ self.protocol = pn.widgets.Select(
261
+ options=sorted(known_implementations),
262
+ value=self.init_protocol,
263
+ name="protocol",
264
+ align="center",
265
+ )
266
+ self.kwargs = pn.widgets.TextInput(
267
+ name="kwargs", value=self.init_kwargs, align="center"
268
+ )
269
+ self.go = pn.widgets.Button(name="⇨", align="end", width=45)
270
+ self.main = SingleSelect(size=10)
271
+ self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
272
+ self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
273
+
274
+ self._register(self.protocol, "protocol_changed", auto=True)
275
+ self._register(self.go, "go_clicked", "clicks", auto=True)
276
+ self._register(self.up, "up_clicked", "clicks", auto=True)
277
+ self._register(self.home, "home_clicked", "clicks", auto=True)
278
+ self._register(None, "selection_changed")
279
+ self.main.connect("selected", self.selection_changed)
280
+ self._register(None, "directory_entered")
281
+ self.prev_protocol = self.protocol.value
282
+ self.prev_kwargs = self.storage_options
283
+
284
+ self.filter_sel = pn.widgets.CheckBoxGroup(
285
+ value=[], options=[], inline=False, align="end", width_policy="min"
286
+ )
287
+ self._register(self.filter_sel, "filters_changed", auto=True)
288
+
289
+ self.panel = pn.Column(
290
+ pn.Row(self.protocol, self.kwargs),
291
+ pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
292
+ self.main.panel,
293
+ )
294
+ self.set_filters(self.filters)
295
+ self.go_clicked()
296
+
297
+ def set_filters(self, filters=None):
298
+ self.filters = filters
299
+ if filters:
300
+ self.filter_sel.options = filters
301
+ self.filter_sel.value = filters
302
+ else:
303
+ self.filter_sel.options = []
304
+ self.filter_sel.value = []
305
+
306
+ @property
307
+ def storage_options(self):
308
+ """Value of the kwargs box as a dictionary"""
309
+ return ast.literal_eval(self.kwargs.value) or {}
310
+
311
+ @property
312
+ def fs(self):
313
+ """Current filesystem instance"""
314
+ if self._fs is None:
315
+ cls = get_filesystem_class(self.protocol.value)
316
+ self._fs = cls(**self.storage_options)
317
+ return self._fs
318
+
319
+ @property
320
+ def urlpath(self):
321
+ """URL of currently selected item"""
322
+ return (
323
+ (f"{self.protocol.value}://{self.main.value[0]}")
324
+ if self.main.value
325
+ else None
326
+ )
327
+
328
+ def open_file(self, mode="rb", compression=None, encoding=None):
329
+ """Create OpenFile instance for the currently selected item
330
+
331
+ For example, in a notebook you might do something like
332
+
333
+ .. code-block::
334
+
335
+ [ ]: sel = FileSelector(); sel
336
+
337
+ # user selects their file
338
+
339
+ [ ]: with sel.open_file('rb') as f:
340
+ ... out = f.read()
341
+
342
+ Parameters
343
+ ----------
344
+ mode: str (optional)
345
+ Open mode for the file.
346
+ compression: str (optional)
347
+ The interact with the file as compressed. Set to 'infer' to guess
348
+ compression from the file ending
349
+ encoding: str (optional)
350
+ If using text mode, use this encoding; defaults to UTF8.
351
+ """
352
+ if self.urlpath is None:
353
+ raise ValueError("No file selected")
354
+ return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
355
+
356
+ def filters_changed(self, values):
357
+ self.filters = values
358
+ self.go_clicked()
359
+
360
+ def selection_changed(self, *_):
361
+ if self.urlpath is None:
362
+ return
363
+ if self.fs.isdir(self.urlpath):
364
+ self.url.value = self.fs._strip_protocol(self.urlpath)
365
+ self.go_clicked()
366
+
367
+ def go_clicked(self, *_):
368
+ if (
369
+ self.prev_protocol != self.protocol.value
370
+ or self.prev_kwargs != self.storage_options
371
+ ):
372
+ self._fs = None # causes fs to be recreated
373
+ self.prev_protocol = self.protocol.value
374
+ self.prev_kwargs = self.storage_options
375
+ listing = sorted(
376
+ self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
377
+ )
378
+ listing = [
379
+ l
380
+ for l in listing
381
+ if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
382
+ ]
383
+ folders = {
384
+ "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
385
+ for o in listing
386
+ if o["type"] == "directory"
387
+ }
388
+ files = {
389
+ "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
390
+ for o in listing
391
+ if o["type"] == "file"
392
+ }
393
+ if self.filters:
394
+ files = {
395
+ k: v
396
+ for k, v in files.items()
397
+ if any(v.endswith(ext) for ext in self.filters)
398
+ }
399
+ self.main.set_options(dict(**folders, **files))
400
+
401
+ def protocol_changed(self, *_):
402
+ self._fs = None
403
+ self.main.options = []
404
+ self.url.value = ""
405
+
406
+ def home_clicked(self, *_):
407
+ self.protocol.value = self.init_protocol
408
+ self.kwargs.value = self.init_kwargs
409
+ self.url.value = self.init_url
410
+ self.go_clicked()
411
+
412
+ def up_clicked(self, *_):
413
+ self.url.value = self.fs._parent(self.url.value)
414
+ self.go_clicked()
venv/lib/python3.10/site-packages/fsspec/implementations/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc ADDED
Binary file (7.56 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/data.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc ADDED
Binary file (37.1 kB). View file
 
venv/lib/python3.10/site-packages/fsspec/implementations/arrow.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import io
3
+ import os
4
+ import secrets
5
+ import shutil
6
+ from contextlib import suppress
7
+ from functools import cached_property, wraps
8
+ from urllib.parse import parse_qs
9
+
10
+ from fsspec.spec import AbstractFileSystem
11
+ from fsspec.utils import (
12
+ get_package_version_without_import,
13
+ infer_storage_options,
14
+ mirror_from,
15
+ tokenize,
16
+ )
17
+
18
+
19
+ def wrap_exceptions(func):
20
+ @wraps(func)
21
+ def wrapper(*args, **kwargs):
22
+ try:
23
+ return func(*args, **kwargs)
24
+ except OSError as exception:
25
+ if not exception.args:
26
+ raise
27
+
28
+ message, *args = exception.args
29
+ if isinstance(message, str) and "does not exist" in message:
30
+ raise FileNotFoundError(errno.ENOENT, message) from exception
31
+ else:
32
+ raise
33
+
34
+ return wrapper
35
+
36
+
37
+ PYARROW_VERSION = None
38
+
39
+
40
+ class ArrowFSWrapper(AbstractFileSystem):
41
+ """FSSpec-compatible wrapper of pyarrow.fs.FileSystem.
42
+
43
+ Parameters
44
+ ----------
45
+ fs : pyarrow.fs.FileSystem
46
+
47
+ """
48
+
49
+ root_marker = "/"
50
+
51
+ def __init__(self, fs, **kwargs):
52
+ global PYARROW_VERSION
53
+ PYARROW_VERSION = get_package_version_without_import("pyarrow")
54
+ self.fs = fs
55
+ super().__init__(**kwargs)
56
+
57
+ @property
58
+ def protocol(self):
59
+ return self.fs.type_name
60
+
61
+ @cached_property
62
+ def fsid(self):
63
+ return "hdfs_" + tokenize(self.fs.host, self.fs.port)
64
+
65
+ @classmethod
66
+ def _strip_protocol(cls, path):
67
+ ops = infer_storage_options(path)
68
+ path = ops["path"]
69
+ if path.startswith("//"):
70
+ # special case for "hdfs://path" (without the triple slash)
71
+ path = path[1:]
72
+ return path
73
+
74
+ def ls(self, path, detail=False, **kwargs):
75
+ path = self._strip_protocol(path)
76
+ from pyarrow.fs import FileSelector
77
+
78
+ entries = [
79
+ self._make_entry(entry)
80
+ for entry in self.fs.get_file_info(FileSelector(path))
81
+ ]
82
+ if detail:
83
+ return entries
84
+ else:
85
+ return [entry["name"] for entry in entries]
86
+
87
+ def info(self, path, **kwargs):
88
+ path = self._strip_protocol(path)
89
+ [info] = self.fs.get_file_info([path])
90
+ return self._make_entry(info)
91
+
92
+ def exists(self, path):
93
+ path = self._strip_protocol(path)
94
+ try:
95
+ self.info(path)
96
+ except FileNotFoundError:
97
+ return False
98
+ else:
99
+ return True
100
+
101
+ def _make_entry(self, info):
102
+ from pyarrow.fs import FileType
103
+
104
+ if info.type is FileType.Directory:
105
+ kind = "directory"
106
+ elif info.type is FileType.File:
107
+ kind = "file"
108
+ elif info.type is FileType.NotFound:
109
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path)
110
+ else:
111
+ kind = "other"
112
+
113
+ return {
114
+ "name": info.path,
115
+ "size": info.size,
116
+ "type": kind,
117
+ "mtime": info.mtime,
118
+ }
119
+
120
+ @wrap_exceptions
121
+ def cp_file(self, path1, path2, **kwargs):
122
+ path1 = self._strip_protocol(path1).rstrip("/")
123
+ path2 = self._strip_protocol(path2).rstrip("/")
124
+
125
+ with self._open(path1, "rb") as lstream:
126
+ tmp_fname = f"{path2}.tmp.{secrets.token_hex(6)}"
127
+ try:
128
+ with self.open(tmp_fname, "wb") as rstream:
129
+ shutil.copyfileobj(lstream, rstream)
130
+ self.fs.move(tmp_fname, path2)
131
+ except BaseException: # noqa
132
+ with suppress(FileNotFoundError):
133
+ self.fs.delete_file(tmp_fname)
134
+ raise
135
+
136
+ @wrap_exceptions
137
+ def mv(self, path1, path2, **kwargs):
138
+ path1 = self._strip_protocol(path1).rstrip("/")
139
+ path2 = self._strip_protocol(path2).rstrip("/")
140
+ self.fs.move(path1, path2)
141
+
142
+ mv_file = mv
143
+
144
+ @wrap_exceptions
145
+ def rm_file(self, path):
146
+ path = self._strip_protocol(path)
147
+ self.fs.delete_file(path)
148
+
149
+ @wrap_exceptions
150
+ def rm(self, path, recursive=False, maxdepth=None):
151
+ path = self._strip_protocol(path).rstrip("/")
152
+ if self.isdir(path):
153
+ if recursive:
154
+ self.fs.delete_dir(path)
155
+ else:
156
+ raise ValueError("Can't delete directories without recursive=False")
157
+ else:
158
+ self.fs.delete_file(path)
159
+
160
+ @wrap_exceptions
161
+ def _open(self, path, mode="rb", block_size=None, seekable=True, **kwargs):
162
+ if mode == "rb":
163
+ if seekable:
164
+ method = self.fs.open_input_file
165
+ else:
166
+ method = self.fs.open_input_stream
167
+ elif mode == "wb":
168
+ method = self.fs.open_output_stream
169
+ elif mode == "ab":
170
+ method = self.fs.open_append_stream
171
+ else:
172
+ raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}")
173
+
174
+ _kwargs = {}
175
+ if mode != "rb" or not seekable:
176
+ if int(PYARROW_VERSION.split(".")[0]) >= 4:
177
+ # disable compression auto-detection
178
+ _kwargs["compression"] = None
179
+ stream = method(path, **_kwargs)
180
+
181
+ return ArrowFile(self, stream, path, mode, block_size, **kwargs)
182
+
183
+ @wrap_exceptions
184
+ def mkdir(self, path, create_parents=True, **kwargs):
185
+ path = self._strip_protocol(path)
186
+ if create_parents:
187
+ self.makedirs(path, exist_ok=True)
188
+ else:
189
+ self.fs.create_dir(path, recursive=False)
190
+
191
+ @wrap_exceptions
192
+ def makedirs(self, path, exist_ok=False):
193
+ path = self._strip_protocol(path)
194
+ self.fs.create_dir(path, recursive=True)
195
+
196
+ @wrap_exceptions
197
+ def rmdir(self, path):
198
+ path = self._strip_protocol(path)
199
+ self.fs.delete_dir(path)
200
+
201
+ @wrap_exceptions
202
+ def modified(self, path):
203
+ path = self._strip_protocol(path)
204
+ return self.fs.get_file_info(path).mtime
205
+
206
+ def cat_file(self, path, start=None, end=None, **kwargs):
207
+ kwargs["seekable"] = start not in [None, 0]
208
+ return super().cat_file(path, start=None, end=None, **kwargs)
209
+
210
+ def get_file(self, rpath, lpath, **kwargs):
211
+ kwargs["seekable"] = False
212
+ super().get_file(rpath, lpath, **kwargs)
213
+
214
+
215
+ @mirror_from(
216
+ "stream",
217
+ [
218
+ "read",
219
+ "seek",
220
+ "tell",
221
+ "write",
222
+ "readable",
223
+ "writable",
224
+ "close",
225
+ "size",
226
+ "seekable",
227
+ ],
228
+ )
229
+ class ArrowFile(io.IOBase):
230
+ def __init__(self, fs, stream, path, mode, block_size=None, **kwargs):
231
+ self.path = path
232
+ self.mode = mode
233
+
234
+ self.fs = fs
235
+ self.stream = stream
236
+
237
+ self.blocksize = self.block_size = block_size
238
+ self.kwargs = kwargs
239
+
240
+ def __enter__(self):
241
+ return self
242
+
243
+ def __exit__(self, *args):
244
+ return self.close()
245
+
246
+
247
+ class HadoopFileSystem(ArrowFSWrapper):
248
+ """A wrapper on top of the pyarrow.fs.HadoopFileSystem
249
+ to connect it's interface with fsspec"""
250
+
251
+ protocol = "hdfs"
252
+
253
+ def __init__(
254
+ self,
255
+ host="default",
256
+ port=0,
257
+ user=None,
258
+ kerb_ticket=None,
259
+ replication=3,
260
+ extra_conf=None,
261
+ **kwargs,
262
+ ):
263
+ """
264
+
265
+ Parameters
266
+ ----------
267
+ host: str
268
+ Hostname, IP or "default" to try to read from Hadoop config
269
+ port: int
270
+ Port to connect on, or default from Hadoop config if 0
271
+ user: str or None
272
+ If given, connect as this username
273
+ kerb_ticket: str or None
274
+ If given, use this ticket for authentication
275
+ replication: int
276
+ set replication factor of file for write operations. default value is 3.
277
+ extra_conf: None or dict
278
+ Passed on to HadoopFileSystem
279
+ """
280
+ from pyarrow.fs import HadoopFileSystem
281
+
282
+ fs = HadoopFileSystem(
283
+ host=host,
284
+ port=port,
285
+ user=user,
286
+ kerb_ticket=kerb_ticket,
287
+ replication=replication,
288
+ extra_conf=extra_conf,
289
+ )
290
+ super().__init__(fs=fs, **kwargs)
291
+
292
+ @staticmethod
293
+ def _get_kwargs_from_urls(path):
294
+ ops = infer_storage_options(path)
295
+ out = {}
296
+ if ops.get("host", None):
297
+ out["host"] = ops["host"]
298
+ if ops.get("username", None):
299
+ out["user"] = ops["username"]
300
+ if ops.get("port", None):
301
+ out["port"] = ops["port"]
302
+ if ops.get("url_query", None):
303
+ queries = parse_qs(ops["url_query"])
304
+ if queries.get("replication", None):
305
+ out["replication"] = int(queries["replication"][0])
306
+ return out
venv/lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import hashlib
5
+
6
+ from fsspec.implementations.local import make_path_posix
7
+
8
+
9
+ class AbstractCacheMapper(abc.ABC):
10
+ """Abstract super-class for mappers from remote URLs to local cached
11
+ basenames.
12
+ """
13
+
14
+ @abc.abstractmethod
15
+ def __call__(self, path: str) -> str:
16
+ ...
17
+
18
+ def __eq__(self, other: object) -> bool:
19
+ # Identity only depends on class. When derived classes have attributes
20
+ # they will need to be included.
21
+ return isinstance(other, type(self))
22
+
23
+ def __hash__(self) -> int:
24
+ # Identity only depends on class. When derived classes have attributes
25
+ # they will need to be included.
26
+ return hash(type(self))
27
+
28
+
29
+ class BasenameCacheMapper(AbstractCacheMapper):
30
+ """Cache mapper that uses the basename of the remote URL and a fixed number
31
+ of directory levels above this.
32
+
33
+ The default is zero directory levels, meaning different paths with the same
34
+ basename will have the same cached basename.
35
+ """
36
+
37
+ def __init__(self, directory_levels: int = 0):
38
+ if directory_levels < 0:
39
+ raise ValueError(
40
+ "BasenameCacheMapper requires zero or positive directory_levels"
41
+ )
42
+ self.directory_levels = directory_levels
43
+
44
+ # Separator for directories when encoded as strings.
45
+ self._separator = "_@_"
46
+
47
+ def __call__(self, path: str) -> str:
48
+ path = make_path_posix(path)
49
+ prefix, *bits = path.rsplit("/", self.directory_levels + 1)
50
+ if bits:
51
+ return self._separator.join(bits)
52
+ else:
53
+ return prefix # No separator found, simple filename
54
+
55
+ def __eq__(self, other: object) -> bool:
56
+ return super().__eq__(other) and self.directory_levels == other.directory_levels
57
+
58
+ def __hash__(self) -> int:
59
+ return super().__hash__() ^ hash(self.directory_levels)
60
+
61
+
62
+ class HashCacheMapper(AbstractCacheMapper):
63
+ """Cache mapper that uses a hash of the remote URL."""
64
+
65
+ def __call__(self, path: str) -> str:
66
+ return hashlib.sha256(path.encode()).hexdigest()
67
+
68
+
69
+ def create_cache_mapper(same_names: bool) -> AbstractCacheMapper:
70
+ """Factory method to create cache mapper for backward compatibility with
71
+ ``CachingFileSystem`` constructor using ``same_names`` kwarg.
72
+ """
73
+ if same_names:
74
+ return BasenameCacheMapper()
75
+ else:
76
+ return HashCacheMapper()
venv/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pickle
5
+ import time
6
+ from typing import TYPE_CHECKING
7
+
8
+ from fsspec.utils import atomic_write
9
+
10
+ try:
11
+ import ujson as json
12
+ except ImportError:
13
+ if not TYPE_CHECKING:
14
+ import json
15
+
16
+ if TYPE_CHECKING:
17
+ from typing import Any, Dict, Iterator, Literal
18
+
19
+ from typing_extensions import TypeAlias
20
+
21
+ from .cached import CachingFileSystem
22
+
23
+ Detail: TypeAlias = Dict[str, Any]
24
+
25
+
26
+ class CacheMetadata:
27
+ """Cache metadata.
28
+
29
+ All reading and writing of cache metadata is performed by this class,
30
+ accessing the cached files and blocks is not.
31
+
32
+ Metadata is stored in a single file per storage directory in JSON format.
33
+ For backward compatibility, also reads metadata stored in pickle format
34
+ which is converted to JSON when next saved.
35
+ """
36
+
37
+ def __init__(self, storage: list[str]):
38
+ """
39
+
40
+ Parameters
41
+ ----------
42
+ storage: list[str]
43
+ Directories containing cached files, must be at least one. Metadata
44
+ is stored in the last of these directories by convention.
45
+ """
46
+ if not storage:
47
+ raise ValueError("CacheMetadata expects at least one storage location")
48
+
49
+ self._storage = storage
50
+ self.cached_files: list[Detail] = [{}]
51
+
52
+ # Private attribute to force saving of metadata in pickle format rather than
53
+ # JSON for use in tests to confirm can read both pickle and JSON formats.
54
+ self._force_save_pickle = False
55
+
56
+ def _load(self, fn: str) -> Detail:
57
+ """Low-level function to load metadata from specific file"""
58
+ try:
59
+ with open(fn, "r") as f:
60
+ return json.load(f)
61
+ except ValueError:
62
+ with open(fn, "rb") as f:
63
+ return pickle.load(f)
64
+
65
+ def _save(self, metadata_to_save: Detail, fn: str) -> None:
66
+ """Low-level function to save metadata to specific file"""
67
+ if self._force_save_pickle:
68
+ with atomic_write(fn) as f:
69
+ pickle.dump(metadata_to_save, f)
70
+ else:
71
+ with atomic_write(fn, mode="w") as f:
72
+ json.dump(metadata_to_save, f)
73
+
74
+ def _scan_locations(
75
+ self, writable_only: bool = False
76
+ ) -> Iterator[tuple[str, str, bool]]:
77
+ """Yield locations (filenames) where metadata is stored, and whether
78
+ writable or not.
79
+
80
+ Parameters
81
+ ----------
82
+ writable: bool
83
+ Set to True to only yield writable locations.
84
+
85
+ Returns
86
+ -------
87
+ Yields (str, str, bool)
88
+ """
89
+ n = len(self._storage)
90
+ for i, storage in enumerate(self._storage):
91
+ writable = i == n - 1
92
+ if writable_only and not writable:
93
+ continue
94
+ yield os.path.join(storage, "cache"), storage, writable
95
+
96
+ def check_file(
97
+ self, path: str, cfs: CachingFileSystem | None
98
+ ) -> Literal[False] | tuple[Detail, str]:
99
+ """If path is in cache return its details, otherwise return ``False``.
100
+
101
+ If the optional CachingFileSystem is specified then it is used to
102
+ perform extra checks to reject possible matches, such as if they are
103
+ too old.
104
+ """
105
+ for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files):
106
+ if path not in cache:
107
+ continue
108
+ detail = cache[path].copy()
109
+
110
+ if cfs is not None:
111
+ if cfs.check_files and detail["uid"] != cfs.fs.ukey(path):
112
+ # Wrong file as determined by hash of file properties
113
+ continue
114
+ if cfs.expiry and time.time() - detail["time"] > cfs.expiry:
115
+ # Cached file has expired
116
+ continue
117
+
118
+ fn = os.path.join(base, detail["fn"])
119
+ if os.path.exists(fn):
120
+ return detail, fn
121
+ return False
122
+
123
+ def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]:
124
+ """Remove expired metadata from the cache.
125
+
126
+ Returns names of files corresponding to expired metadata and a boolean
127
+ flag indicating whether the writable cache is empty. Caller is
128
+ responsible for deleting the expired files.
129
+ """
130
+ expired_files = []
131
+ for path, detail in self.cached_files[-1].copy().items():
132
+ if time.time() - detail["time"] > expiry_time:
133
+ fn = detail.get("fn", "")
134
+ if not fn:
135
+ raise RuntimeError(
136
+ f"Cache metadata does not contain 'fn' for {path}"
137
+ )
138
+ fn = os.path.join(self._storage[-1], fn)
139
+ expired_files.append(fn)
140
+ self.cached_files[-1].pop(path)
141
+
142
+ if self.cached_files[-1]:
143
+ cache_path = os.path.join(self._storage[-1], "cache")
144
+ self._save(self.cached_files[-1], cache_path)
145
+
146
+ writable_cache_empty = not self.cached_files[-1]
147
+ return expired_files, writable_cache_empty
148
+
149
+ def load(self) -> None:
150
+ """Load all metadata from disk and store in ``self.cached_files``"""
151
+ cached_files = []
152
+ for fn, _, _ in self._scan_locations():
153
+ if os.path.exists(fn):
154
+ # TODO: consolidate blocks here
155
+ loaded_cached_files = self._load(fn)
156
+ for c in loaded_cached_files.values():
157
+ if isinstance(c["blocks"], list):
158
+ c["blocks"] = set(c["blocks"])
159
+ cached_files.append(loaded_cached_files)
160
+ else:
161
+ cached_files.append({})
162
+ self.cached_files = cached_files or [{}]
163
+
164
+ def on_close_cached_file(self, f: Any, path: str) -> None:
165
+ """Perform side-effect actions on closing a cached file.
166
+
167
+ The actual closing of the file is the responsibility of the caller.
168
+ """
169
+ # File must be writeble, so in self.cached_files[-1]
170
+ c = self.cached_files[-1][path]
171
+ if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size:
172
+ c["blocks"] = True
173
+
174
+ def pop_file(self, path: str) -> str | None:
175
+ """Remove metadata of cached file.
176
+
177
+ If path is in the cache, return the filename of the cached file,
178
+ otherwise return ``None``. Caller is responsible for deleting the
179
+ cached file.
180
+ """
181
+ details = self.check_file(path, None)
182
+ if not details:
183
+ return None
184
+ _, fn = details
185
+ if fn.startswith(self._storage[-1]):
186
+ self.cached_files[-1].pop(path)
187
+ self.save()
188
+ else:
189
+ raise PermissionError(
190
+ "Can only delete cached file in last, writable cache location"
191
+ )
192
+ return fn
193
+
194
+ def save(self) -> None:
195
+ """Save metadata to disk"""
196
+ for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files):
197
+ if not writable:
198
+ continue
199
+
200
+ if os.path.exists(fn):
201
+ cached_files = self._load(fn)
202
+ for k, c in cached_files.items():
203
+ if k in cache:
204
+ if c["blocks"] is True or cache[k]["blocks"] is True:
205
+ c["blocks"] = True
206
+ else:
207
+ # self.cached_files[*][*]["blocks"] must continue to
208
+ # point to the same set object so that updates
209
+ # performed by MMapCache are propagated back to
210
+ # self.cached_files.
211
+ blocks = cache[k]["blocks"]
212
+ blocks.update(c["blocks"])
213
+ c["blocks"] = blocks
214
+ c["time"] = max(c["time"], cache[k]["time"])
215
+ c["uid"] = cache[k]["uid"]
216
+
217
+ # Files can be added to cache after it was written once
218
+ for k, c in cache.items():
219
+ if k not in cached_files:
220
+ cached_files[k] = c
221
+ else:
222
+ cached_files = cache
223
+ cache = {k: v.copy() for k, v in cached_files.items()}
224
+ for c in cache.values():
225
+ if isinstance(c["blocks"], set):
226
+ c["blocks"] = list(c["blocks"])
227
+ self._save(cache, fn)
228
+ self.cached_files[-1] = cached_files
229
+
230
+ def update_file(self, path: str, detail: Detail) -> None:
231
+ """Update metadata for specific file in memory, do not save"""
232
+ self.cached_files[-1][path] = detail
venv/lib/python3.10/site-packages/fsspec/implementations/cached.py ADDED
@@ -0,0 +1,939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import tempfile
7
+ import time
8
+ import weakref
9
+ from shutil import rmtree
10
+ from typing import TYPE_CHECKING, Any, Callable, ClassVar
11
+
12
+ from fsspec import AbstractFileSystem, filesystem
13
+ from fsspec.callbacks import DEFAULT_CALLBACK
14
+ from fsspec.compression import compr
15
+ from fsspec.core import BaseCache, MMapCache
16
+ from fsspec.exceptions import BlocksizeMismatchError
17
+ from fsspec.implementations.cache_mapper import create_cache_mapper
18
+ from fsspec.implementations.cache_metadata import CacheMetadata
19
+ from fsspec.spec import AbstractBufferedFile
20
+ from fsspec.transaction import Transaction
21
+ from fsspec.utils import infer_compression
22
+
23
+ if TYPE_CHECKING:
24
+ from fsspec.implementations.cache_mapper import AbstractCacheMapper
25
+
26
+ logger = logging.getLogger("fsspec.cached")
27
+
28
+
29
+ class WriteCachedTransaction(Transaction):
30
+ def complete(self, commit=True):
31
+ rpaths = [f.path for f in self.files]
32
+ lpaths = [f.fn for f in self.files]
33
+ if commit:
34
+ self.fs.put(lpaths, rpaths)
35
+ self.files.clear()
36
+ self.fs._intrans = False
37
+ self.fs._transaction = None
38
+ self.fs = None # break cycle
39
+
40
+
41
+ class CachingFileSystem(AbstractFileSystem):
42
+ """Locally caching filesystem, layer over any other FS
43
+
44
+ This class implements chunk-wise local storage of remote files, for quick
45
+ access after the initial download. The files are stored in a given
46
+ directory with hashes of URLs for the filenames. If no directory is given,
47
+ a temporary one is used, which should be cleaned up by the OS after the
48
+ process ends. The files themselves are sparse (as implemented in
49
+ :class:`~fsspec.caching.MMapCache`), so only the data which is accessed
50
+ takes up space.
51
+
52
+ Restrictions:
53
+
54
+ - the block-size must be the same for each access of a given file, unless
55
+ all blocks of the file have already been read
56
+ - caching can only be applied to file-systems which produce files
57
+ derived from fsspec.spec.AbstractBufferedFile ; LocalFileSystem is also
58
+ allowed, for testing
59
+ """
60
+
61
+ protocol: ClassVar[str | tuple[str, ...]] = ("blockcache", "cached")
62
+
63
+ def __init__(
64
+ self,
65
+ target_protocol=None,
66
+ cache_storage="TMP",
67
+ cache_check=10,
68
+ check_files=False,
69
+ expiry_time=604800,
70
+ target_options=None,
71
+ fs=None,
72
+ same_names: bool | None = None,
73
+ compression=None,
74
+ cache_mapper: AbstractCacheMapper | None = None,
75
+ **kwargs,
76
+ ):
77
+ """
78
+
79
+ Parameters
80
+ ----------
81
+ target_protocol: str (optional)
82
+ Target filesystem protocol. Provide either this or ``fs``.
83
+ cache_storage: str or list(str)
84
+ Location to store files. If "TMP", this is a temporary directory,
85
+ and will be cleaned up by the OS when this process ends (or later).
86
+ If a list, each location will be tried in the order given, but
87
+ only the last will be considered writable.
88
+ cache_check: int
89
+ Number of seconds between reload of cache metadata
90
+ check_files: bool
91
+ Whether to explicitly see if the UID of the remote file matches
92
+ the stored one before using. Warning: some file systems such as
93
+ HTTP cannot reliably give a unique hash of the contents of some
94
+ path, so be sure to set this option to False.
95
+ expiry_time: int
96
+ The time in seconds after which a local copy is considered useless.
97
+ Set to falsy to prevent expiry. The default is equivalent to one
98
+ week.
99
+ target_options: dict or None
100
+ Passed to the instantiation of the FS, if fs is None.
101
+ fs: filesystem instance
102
+ The target filesystem to run against. Provide this or ``protocol``.
103
+ same_names: bool (optional)
104
+ By default, target URLs are hashed using a ``HashCacheMapper`` so
105
+ that files from different backends with the same basename do not
106
+ conflict. If this argument is ``true``, a ``BasenameCacheMapper``
107
+ is used instead. Other cache mapper options are available by using
108
+ the ``cache_mapper`` keyword argument. Only one of this and
109
+ ``cache_mapper`` should be specified.
110
+ compression: str (optional)
111
+ To decompress on download. Can be 'infer' (guess from the URL name),
112
+ one of the entries in ``fsspec.compression.compr``, or None for no
113
+ decompression.
114
+ cache_mapper: AbstractCacheMapper (optional)
115
+ The object use to map from original filenames to cached filenames.
116
+ Only one of this and ``same_names`` should be specified.
117
+ """
118
+ super().__init__(**kwargs)
119
+ if fs is None and target_protocol is None:
120
+ raise ValueError(
121
+ "Please provide filesystem instance(fs) or target_protocol"
122
+ )
123
+ if not (fs is None) ^ (target_protocol is None):
124
+ raise ValueError(
125
+ "Both filesystems (fs) and target_protocol may not be both given."
126
+ )
127
+ if cache_storage == "TMP":
128
+ tempdir = tempfile.mkdtemp()
129
+ storage = [tempdir]
130
+ weakref.finalize(self, self._remove_tempdir, tempdir)
131
+ else:
132
+ if isinstance(cache_storage, str):
133
+ storage = [cache_storage]
134
+ else:
135
+ storage = cache_storage
136
+ os.makedirs(storage[-1], exist_ok=True)
137
+ self.storage = storage
138
+ self.kwargs = target_options or {}
139
+ self.cache_check = cache_check
140
+ self.check_files = check_files
141
+ self.expiry = expiry_time
142
+ self.compression = compression
143
+
144
+ # Size of cache in bytes. If None then the size is unknown and will be
145
+ # recalculated the next time cache_size() is called. On writes to the
146
+ # cache this is reset to None.
147
+ self._cache_size = None
148
+
149
+ if same_names is not None and cache_mapper is not None:
150
+ raise ValueError(
151
+ "Cannot specify both same_names and cache_mapper in "
152
+ "CachingFileSystem.__init__"
153
+ )
154
+ if cache_mapper is not None:
155
+ self._mapper = cache_mapper
156
+ else:
157
+ self._mapper = create_cache_mapper(
158
+ same_names if same_names is not None else False
159
+ )
160
+
161
+ self.target_protocol = (
162
+ target_protocol
163
+ if isinstance(target_protocol, str)
164
+ else (fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0])
165
+ )
166
+ self._metadata = CacheMetadata(self.storage)
167
+ self.load_cache()
168
+ self.fs = fs if fs is not None else filesystem(target_protocol, **self.kwargs)
169
+
170
+ def _strip_protocol(path):
171
+ # acts as a method, since each instance has a difference target
172
+ return self.fs._strip_protocol(type(self)._strip_protocol(path))
173
+
174
+ self._strip_protocol: Callable = _strip_protocol
175
+
176
+ @staticmethod
177
+ def _remove_tempdir(tempdir):
178
+ try:
179
+ rmtree(tempdir)
180
+ except Exception:
181
+ pass
182
+
183
+ def _mkcache(self):
184
+ os.makedirs(self.storage[-1], exist_ok=True)
185
+
186
+ def cache_size(self):
187
+ """Return size of cache in bytes.
188
+
189
+ If more than one cache directory is in use, only the size of the last
190
+ one (the writable cache directory) is returned.
191
+ """
192
+ if self._cache_size is None:
193
+ cache_dir = self.storage[-1]
194
+ self._cache_size = filesystem("file").du(cache_dir, withdirs=True)
195
+ return self._cache_size
196
+
197
+ def load_cache(self):
198
+ """Read set of stored blocks from file"""
199
+ self._metadata.load()
200
+ self._mkcache()
201
+ self.last_cache = time.time()
202
+
203
+ def save_cache(self):
204
+ """Save set of stored blocks from file"""
205
+ self._mkcache()
206
+ self._metadata.save()
207
+ self.last_cache = time.time()
208
+ self._cache_size = None
209
+
210
+ def _check_cache(self):
211
+ """Reload caches if time elapsed or any disappeared"""
212
+ self._mkcache()
213
+ if not self.cache_check:
214
+ # explicitly told not to bother checking
215
+ return
216
+ timecond = time.time() - self.last_cache > self.cache_check
217
+ existcond = all(os.path.exists(storage) for storage in self.storage)
218
+ if timecond or not existcond:
219
+ self.load_cache()
220
+
221
+ def _check_file(self, path):
222
+ """Is path in cache and still valid"""
223
+ path = self._strip_protocol(path)
224
+ self._check_cache()
225
+ return self._metadata.check_file(path, self)
226
+
227
+ def clear_cache(self):
228
+ """Remove all files and metadata from the cache
229
+
230
+ In the case of multiple cache locations, this clears only the last one,
231
+ which is assumed to be the read/write one.
232
+ """
233
+ rmtree(self.storage[-1])
234
+ self.load_cache()
235
+ self._cache_size = None
236
+
237
+ def clear_expired_cache(self, expiry_time=None):
238
+ """Remove all expired files and metadata from the cache
239
+
240
+ In the case of multiple cache locations, this clears only the last one,
241
+ which is assumed to be the read/write one.
242
+
243
+ Parameters
244
+ ----------
245
+ expiry_time: int
246
+ The time in seconds after which a local copy is considered useless.
247
+ If not defined the default is equivalent to the attribute from the
248
+ file caching instantiation.
249
+ """
250
+
251
+ if not expiry_time:
252
+ expiry_time = self.expiry
253
+
254
+ self._check_cache()
255
+
256
+ expired_files, writable_cache_empty = self._metadata.clear_expired(expiry_time)
257
+ for fn in expired_files:
258
+ if os.path.exists(fn):
259
+ os.remove(fn)
260
+
261
+ if writable_cache_empty:
262
+ rmtree(self.storage[-1])
263
+ self.load_cache()
264
+
265
+ self._cache_size = None
266
+
267
+ def pop_from_cache(self, path):
268
+ """Remove cached version of given file
269
+
270
+ Deletes local copy of the given (remote) path. If it is found in a cache
271
+ location which is not the last, it is assumed to be read-only, and
272
+ raises PermissionError
273
+ """
274
+ path = self._strip_protocol(path)
275
+ fn = self._metadata.pop_file(path)
276
+ if fn is not None:
277
+ os.remove(fn)
278
+ self._cache_size = None
279
+
280
+ def _open(
281
+ self,
282
+ path,
283
+ mode="rb",
284
+ block_size=None,
285
+ autocommit=True,
286
+ cache_options=None,
287
+ **kwargs,
288
+ ):
289
+ """Wrap the target _open
290
+
291
+ If the whole file exists in the cache, just open it locally and
292
+ return that.
293
+
294
+ Otherwise, open the file on the target FS, and make it have a mmap
295
+ cache pointing to the location which we determine, in our cache.
296
+ The ``blocks`` instance is shared, so as the mmap cache instance
297
+ updates, so does the entry in our ``cached_files`` attribute.
298
+ We monkey-patch this file, so that when it closes, we call
299
+ ``close_and_update`` to save the state of the blocks.
300
+ """
301
+ path = self._strip_protocol(path)
302
+
303
+ path = self.fs._strip_protocol(path)
304
+ if "r" not in mode:
305
+ return self.fs._open(
306
+ path,
307
+ mode=mode,
308
+ block_size=block_size,
309
+ autocommit=autocommit,
310
+ cache_options=cache_options,
311
+ **kwargs,
312
+ )
313
+ detail = self._check_file(path)
314
+ if detail:
315
+ # file is in cache
316
+ detail, fn = detail
317
+ hash, blocks = detail["fn"], detail["blocks"]
318
+ if blocks is True:
319
+ # stored file is complete
320
+ logger.debug("Opening local copy of %s", path)
321
+ return open(fn, mode)
322
+ # TODO: action where partial file exists in read-only cache
323
+ logger.debug("Opening partially cached copy of %s", path)
324
+ else:
325
+ hash = self._mapper(path)
326
+ fn = os.path.join(self.storage[-1], hash)
327
+ blocks = set()
328
+ detail = {
329
+ "original": path,
330
+ "fn": hash,
331
+ "blocks": blocks,
332
+ "time": time.time(),
333
+ "uid": self.fs.ukey(path),
334
+ }
335
+ self._metadata.update_file(path, detail)
336
+ logger.debug("Creating local sparse file for %s", path)
337
+
338
+ # call target filesystems open
339
+ self._mkcache()
340
+ f = self.fs._open(
341
+ path,
342
+ mode=mode,
343
+ block_size=block_size,
344
+ autocommit=autocommit,
345
+ cache_options=cache_options,
346
+ cache_type="none",
347
+ **kwargs,
348
+ )
349
+ if self.compression:
350
+ comp = (
351
+ infer_compression(path)
352
+ if self.compression == "infer"
353
+ else self.compression
354
+ )
355
+ f = compr[comp](f, mode="rb")
356
+ if "blocksize" in detail:
357
+ if detail["blocksize"] != f.blocksize:
358
+ raise BlocksizeMismatchError(
359
+ f"Cached file must be reopened with same block"
360
+ f" size as original (old: {detail['blocksize']},"
361
+ f" new {f.blocksize})"
362
+ )
363
+ else:
364
+ detail["blocksize"] = f.blocksize
365
+ f.cache = MMapCache(f.blocksize, f._fetch_range, f.size, fn, blocks)
366
+ close = f.close
367
+ f.close = lambda: self.close_and_update(f, close)
368
+ self.save_cache()
369
+ return f
370
+
371
+ def _parent(self, path):
372
+ return self.fs._parent(path)
373
+
374
+ def hash_name(self, path: str, *args: Any) -> str:
375
+ # Kept for backward compatibility with downstream libraries.
376
+ # Ignores extra arguments, previously same_name boolean.
377
+ return self._mapper(path)
378
+
379
+ def close_and_update(self, f, close):
380
+ """Called when a file is closing, so store the set of blocks"""
381
+ if f.closed:
382
+ return
383
+ path = self._strip_protocol(f.path)
384
+ self._metadata.on_close_cached_file(f, path)
385
+ try:
386
+ logger.debug("going to save")
387
+ self.save_cache()
388
+ logger.debug("saved")
389
+ except OSError:
390
+ logger.debug("Cache saving failed while closing file")
391
+ except NameError:
392
+ logger.debug("Cache save failed due to interpreter shutdown")
393
+ close()
394
+ f.closed = True
395
+
396
+ def ls(self, path, detail=True):
397
+ return self.fs.ls(path, detail)
398
+
399
+ def __getattribute__(self, item):
400
+ if item in {
401
+ "load_cache",
402
+ "_open",
403
+ "save_cache",
404
+ "close_and_update",
405
+ "__init__",
406
+ "__getattribute__",
407
+ "__reduce__",
408
+ "_make_local_details",
409
+ "open",
410
+ "cat",
411
+ "cat_file",
412
+ "cat_ranges",
413
+ "get",
414
+ "read_block",
415
+ "tail",
416
+ "head",
417
+ "info",
418
+ "ls",
419
+ "exists",
420
+ "isfile",
421
+ "isdir",
422
+ "_check_file",
423
+ "_check_cache",
424
+ "_mkcache",
425
+ "clear_cache",
426
+ "clear_expired_cache",
427
+ "pop_from_cache",
428
+ "_mkcache",
429
+ "local_file",
430
+ "_paths_from_path",
431
+ "get_mapper",
432
+ "open_many",
433
+ "commit_many",
434
+ "hash_name",
435
+ "__hash__",
436
+ "__eq__",
437
+ "to_json",
438
+ "cache_size",
439
+ "pipe_file",
440
+ "pipe",
441
+ "isdir",
442
+ "isfile",
443
+ "exists",
444
+ "start_transaction",
445
+ "end_transaction",
446
+ }:
447
+ # all the methods defined in this class. Note `open` here, since
448
+ # it calls `_open`, but is actually in superclass
449
+ return lambda *args, **kw: getattr(type(self), item).__get__(self)(
450
+ *args, **kw
451
+ )
452
+ if item in ["__reduce_ex__"]:
453
+ raise AttributeError
454
+ if item in ["transaction"]:
455
+ # property
456
+ return type(self).transaction.__get__(self)
457
+ if item in ["_cache", "transaction_type"]:
458
+ # class attributes
459
+ return getattr(type(self), item)
460
+ if item == "__class__":
461
+ return type(self)
462
+ d = object.__getattribute__(self, "__dict__")
463
+ fs = d.get("fs", None) # fs is not immediately defined
464
+ if item in d:
465
+ return d[item]
466
+ elif fs is not None:
467
+ if item in fs.__dict__:
468
+ # attribute of instance
469
+ return fs.__dict__[item]
470
+ # attributed belonging to the target filesystem
471
+ cls = type(fs)
472
+ m = getattr(cls, item)
473
+ if (inspect.isfunction(m) or inspect.isdatadescriptor(m)) and (
474
+ not hasattr(m, "__self__") or m.__self__ is None
475
+ ):
476
+ # instance method
477
+ return m.__get__(fs, cls)
478
+ return m # class method or attribute
479
+ else:
480
+ # attributes of the superclass, while target is being set up
481
+ return super().__getattribute__(item)
482
+
483
+ def __eq__(self, other):
484
+ """Test for equality."""
485
+ if self is other:
486
+ return True
487
+ if not isinstance(other, type(self)):
488
+ return False
489
+ return (
490
+ self.storage == other.storage
491
+ and self.kwargs == other.kwargs
492
+ and self.cache_check == other.cache_check
493
+ and self.check_files == other.check_files
494
+ and self.expiry == other.expiry
495
+ and self.compression == other.compression
496
+ and self._mapper == other._mapper
497
+ and self.target_protocol == other.target_protocol
498
+ )
499
+
500
+ def __hash__(self):
501
+ """Calculate hash."""
502
+ return (
503
+ hash(tuple(self.storage))
504
+ ^ hash(str(self.kwargs))
505
+ ^ hash(self.cache_check)
506
+ ^ hash(self.check_files)
507
+ ^ hash(self.expiry)
508
+ ^ hash(self.compression)
509
+ ^ hash(self._mapper)
510
+ ^ hash(self.target_protocol)
511
+ )
512
+
513
+ def to_json(self):
514
+ """Calculate JSON representation.
515
+
516
+ Not implemented yet for CachingFileSystem.
517
+ """
518
+ raise NotImplementedError(
519
+ "CachingFileSystem JSON representation not implemented"
520
+ )
521
+
522
+
523
+ class WholeFileCacheFileSystem(CachingFileSystem):
524
+ """Caches whole remote files on first access
525
+
526
+ This class is intended as a layer over any other file system, and
527
+ will make a local copy of each file accessed, so that all subsequent
528
+ reads are local. This is similar to ``CachingFileSystem``, but without
529
+ the block-wise functionality and so can work even when sparse files
530
+ are not allowed. See its docstring for definition of the init
531
+ arguments.
532
+
533
+ The class still needs access to the remote store for listing files,
534
+ and may refresh cached files.
535
+ """
536
+
537
+ protocol = "filecache"
538
+ local_file = True
539
+
540
+ def open_many(self, open_files, **kwargs):
541
+ paths = [of.path for of in open_files]
542
+ if "r" in open_files.mode:
543
+ self._mkcache()
544
+ else:
545
+ return [
546
+ LocalTempFile(
547
+ self.fs,
548
+ path,
549
+ mode=open_files.mode,
550
+ fn=os.path.join(self.storage[-1], self._mapper(path)),
551
+ **kwargs,
552
+ )
553
+ for path in paths
554
+ ]
555
+
556
+ if self.compression:
557
+ raise NotImplementedError
558
+ details = [self._check_file(sp) for sp in paths]
559
+ downpath = [p for p, d in zip(paths, details) if not d]
560
+ downfn0 = [
561
+ os.path.join(self.storage[-1], self._mapper(p))
562
+ for p, d in zip(paths, details)
563
+ ] # keep these path names for opening later
564
+ downfn = [fn for fn, d in zip(downfn0, details) if not d]
565
+ if downpath:
566
+ # skip if all files are already cached and up to date
567
+ self.fs.get(downpath, downfn)
568
+
569
+ # update metadata - only happens when downloads are successful
570
+ newdetail = [
571
+ {
572
+ "original": path,
573
+ "fn": self._mapper(path),
574
+ "blocks": True,
575
+ "time": time.time(),
576
+ "uid": self.fs.ukey(path),
577
+ }
578
+ for path in downpath
579
+ ]
580
+ for path, detail in zip(downpath, newdetail):
581
+ self._metadata.update_file(path, detail)
582
+ self.save_cache()
583
+
584
+ def firstpart(fn):
585
+ # helper to adapt both whole-file and simple-cache
586
+ return fn[1] if isinstance(fn, tuple) else fn
587
+
588
+ return [
589
+ open(firstpart(fn0) if fn0 else fn1, mode=open_files.mode)
590
+ for fn0, fn1 in zip(details, downfn0)
591
+ ]
592
+
593
+ def commit_many(self, open_files):
594
+ self.fs.put([f.fn for f in open_files], [f.path for f in open_files])
595
+ [f.close() for f in open_files]
596
+ for f in open_files:
597
+ # in case autocommit is off, and so close did not already delete
598
+ try:
599
+ os.remove(f.name)
600
+ except FileNotFoundError:
601
+ pass
602
+ self._cache_size = None
603
+
604
+ def _make_local_details(self, path):
605
+ hash = self._mapper(path)
606
+ fn = os.path.join(self.storage[-1], hash)
607
+ detail = {
608
+ "original": path,
609
+ "fn": hash,
610
+ "blocks": True,
611
+ "time": time.time(),
612
+ "uid": self.fs.ukey(path),
613
+ }
614
+ self._metadata.update_file(path, detail)
615
+ logger.debug("Copying %s to local cache", path)
616
+ return fn
617
+
618
+ def cat(
619
+ self,
620
+ path,
621
+ recursive=False,
622
+ on_error="raise",
623
+ callback=DEFAULT_CALLBACK,
624
+ **kwargs,
625
+ ):
626
+ paths = self.expand_path(
627
+ path, recursive=recursive, maxdepth=kwargs.get("maxdepth", None)
628
+ )
629
+ getpaths = []
630
+ storepaths = []
631
+ fns = []
632
+ out = {}
633
+ for p in paths.copy():
634
+ try:
635
+ detail = self._check_file(p)
636
+ if not detail:
637
+ fn = self._make_local_details(p)
638
+ getpaths.append(p)
639
+ storepaths.append(fn)
640
+ else:
641
+ detail, fn = detail if isinstance(detail, tuple) else (None, detail)
642
+ fns.append(fn)
643
+ except Exception as e:
644
+ if on_error == "raise":
645
+ raise
646
+ if on_error == "return":
647
+ out[p] = e
648
+ paths.remove(p)
649
+
650
+ if getpaths:
651
+ self.fs.get(getpaths, storepaths)
652
+ self.save_cache()
653
+
654
+ callback.set_size(len(paths))
655
+ for p, fn in zip(paths, fns):
656
+ with open(fn, "rb") as f:
657
+ out[p] = f.read()
658
+ callback.relative_update(1)
659
+ if isinstance(path, str) and len(paths) == 1 and recursive is False:
660
+ out = out[paths[0]]
661
+ return out
662
+
663
+ def _open(self, path, mode="rb", **kwargs):
664
+ path = self._strip_protocol(path)
665
+ if "r" not in mode:
666
+ fn = self._make_local_details(path)
667
+ user_specified_kwargs = {
668
+ k: v
669
+ for k, v in kwargs.items()
670
+ # those kwargs were added by open(), we don't want them
671
+ if k not in ["autocommit", "block_size", "cache_options"]
672
+ }
673
+ return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
674
+ detail = self._check_file(path)
675
+ if detail:
676
+ detail, fn = detail
677
+ _, blocks = detail["fn"], detail["blocks"]
678
+ if blocks is True:
679
+ logger.debug("Opening local copy of %s", path)
680
+
681
+ # In order to support downstream filesystems to be able to
682
+ # infer the compression from the original filename, like
683
+ # the `TarFileSystem`, let's extend the `io.BufferedReader`
684
+ # fileobject protocol by adding a dedicated attribute
685
+ # `original`.
686
+ f = open(fn, mode)
687
+ f.original = detail.get("original")
688
+ return f
689
+ else:
690
+ raise ValueError(
691
+ f"Attempt to open partially cached file {path}"
692
+ f" as a wholly cached file"
693
+ )
694
+ else:
695
+ fn = self._make_local_details(path)
696
+ kwargs["mode"] = mode
697
+
698
+ # call target filesystems open
699
+ self._mkcache()
700
+ if self.compression:
701
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
702
+ if isinstance(f, AbstractBufferedFile):
703
+ # want no type of caching if just downloading whole thing
704
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
705
+ comp = (
706
+ infer_compression(path)
707
+ if self.compression == "infer"
708
+ else self.compression
709
+ )
710
+ f = compr[comp](f, mode="rb")
711
+ data = True
712
+ while data:
713
+ block = getattr(f, "blocksize", 5 * 2**20)
714
+ data = f.read(block)
715
+ f2.write(data)
716
+ else:
717
+ self.fs.get_file(path, fn)
718
+ self.save_cache()
719
+ return self._open(path, mode)
720
+
721
+
722
+ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
723
+ """Caches whole remote files on first access
724
+
725
+ This class is intended as a layer over any other file system, and
726
+ will make a local copy of each file accessed, so that all subsequent
727
+ reads are local. This implementation only copies whole files, and
728
+ does not keep any metadata about the download time or file details.
729
+ It is therefore safer to use in multi-threaded/concurrent situations.
730
+
731
+ This is the only of the caching filesystems that supports write: you will
732
+ be given a real local open file, and upon close and commit, it will be
733
+ uploaded to the target filesystem; the writability or the target URL is
734
+ not checked until that time.
735
+
736
+ """
737
+
738
+ protocol = "simplecache"
739
+ local_file = True
740
+ transaction_type = WriteCachedTransaction
741
+
742
+ def __init__(self, **kwargs):
743
+ kw = kwargs.copy()
744
+ for key in ["cache_check", "expiry_time", "check_files"]:
745
+ kw[key] = False
746
+ super().__init__(**kw)
747
+ for storage in self.storage:
748
+ if not os.path.exists(storage):
749
+ os.makedirs(storage, exist_ok=True)
750
+
751
+ def _check_file(self, path):
752
+ self._check_cache()
753
+ sha = self._mapper(path)
754
+ for storage in self.storage:
755
+ fn = os.path.join(storage, sha)
756
+ if os.path.exists(fn):
757
+ return fn
758
+
759
+ def save_cache(self):
760
+ pass
761
+
762
+ def load_cache(self):
763
+ pass
764
+
765
+ def pipe_file(self, path, value=None, **kwargs):
766
+ if self._intrans:
767
+ with self.open(path, "wb") as f:
768
+ f.write(value)
769
+ else:
770
+ super().pipe_file(path, value)
771
+
772
+ def ls(self, path, detail=True, **kwargs):
773
+ path = self._strip_protocol(path)
774
+ details = []
775
+ try:
776
+ details = self.fs.ls(
777
+ path, detail=True, **kwargs
778
+ ).copy() # don't edit original!
779
+ except FileNotFoundError as e:
780
+ ex = e
781
+ else:
782
+ ex = None
783
+ if self._intrans:
784
+ path1 = path.rstrip("/") + "/"
785
+ for f in self.transaction.files:
786
+ if f.path == path:
787
+ details.append(
788
+ {"name": path, "size": f.size or f.tell(), "type": "file"}
789
+ )
790
+ elif f.path.startswith(path1):
791
+ if f.path.count("/") == path1.count("/"):
792
+ details.append(
793
+ {"name": f.path, "size": f.size or f.tell(), "type": "file"}
794
+ )
795
+ else:
796
+ dname = "/".join(f.path.split("/")[: path1.count("/") + 1])
797
+ details.append({"name": dname, "size": 0, "type": "directory"})
798
+ if ex is not None and not details:
799
+ raise ex
800
+ if detail:
801
+ return details
802
+ return sorted(_["name"] for _ in details)
803
+
804
+ def info(self, path, **kwargs):
805
+ path = self._strip_protocol(path)
806
+ if self._intrans:
807
+ f = [_ for _ in self.transaction.files if _.path == path]
808
+ if f:
809
+ return {"name": path, "size": f[0].size or f[0].tell(), "type": "file"}
810
+ f = any(_.path.startswith(path + "/") for _ in self.transaction.files)
811
+ if f:
812
+ return {"name": path, "size": 0, "type": "directory"}
813
+ return self.fs.info(path, **kwargs)
814
+
815
+ def pipe(self, path, value=None, **kwargs):
816
+ if isinstance(path, str):
817
+ self.pipe_file(self._strip_protocol(path), value, **kwargs)
818
+ elif isinstance(path, dict):
819
+ for k, v in path.items():
820
+ self.pipe_file(self._strip_protocol(k), v, **kwargs)
821
+ else:
822
+ raise ValueError("path must be str or dict")
823
+
824
+ def cat_ranges(
825
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
826
+ ):
827
+ lpaths = [self._check_file(p) for p in paths]
828
+ rpaths = [p for l, p in zip(lpaths, paths) if l is False]
829
+ lpaths = [l for l, p in zip(lpaths, paths) if l is False]
830
+ self.fs.get(rpaths, lpaths)
831
+ return super().cat_ranges(
832
+ paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
833
+ )
834
+
835
+ def _open(self, path, mode="rb", **kwargs):
836
+ path = self._strip_protocol(path)
837
+ sha = self._mapper(path)
838
+
839
+ if "r" not in mode:
840
+ fn = os.path.join(self.storage[-1], sha)
841
+ user_specified_kwargs = {
842
+ k: v
843
+ for k, v in kwargs.items()
844
+ if k not in ["autocommit", "block_size", "cache_options"]
845
+ } # those were added by open()
846
+ return LocalTempFile(
847
+ self,
848
+ path,
849
+ mode=mode,
850
+ autocommit=not self._intrans,
851
+ fn=fn,
852
+ **user_specified_kwargs,
853
+ )
854
+ fn = self._check_file(path)
855
+ if fn:
856
+ return open(fn, mode)
857
+
858
+ fn = os.path.join(self.storage[-1], sha)
859
+ logger.debug("Copying %s to local cache", path)
860
+ kwargs["mode"] = mode
861
+
862
+ self._mkcache()
863
+ self._cache_size = None
864
+ if self.compression:
865
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
866
+ if isinstance(f, AbstractBufferedFile):
867
+ # want no type of caching if just downloading whole thing
868
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
869
+ comp = (
870
+ infer_compression(path)
871
+ if self.compression == "infer"
872
+ else self.compression
873
+ )
874
+ f = compr[comp](f, mode="rb")
875
+ data = True
876
+ while data:
877
+ block = getattr(f, "blocksize", 5 * 2**20)
878
+ data = f.read(block)
879
+ f2.write(data)
880
+ else:
881
+ self.fs.get_file(path, fn)
882
+ return self._open(path, mode)
883
+
884
+
885
+ class LocalTempFile:
886
+ """A temporary local file, which will be uploaded on commit"""
887
+
888
+ def __init__(self, fs, path, fn, mode="wb", autocommit=True, seek=0, **kwargs):
889
+ self.fn = fn
890
+ self.fh = open(fn, mode)
891
+ self.mode = mode
892
+ if seek:
893
+ self.fh.seek(seek)
894
+ self.path = path
895
+ self.size = None
896
+ self.fs = fs
897
+ self.closed = False
898
+ self.autocommit = autocommit
899
+ self.kwargs = kwargs
900
+
901
+ def __reduce__(self):
902
+ # always open in r+b to allow continuing writing at a location
903
+ return (
904
+ LocalTempFile,
905
+ (self.fs, self.path, self.fn, "r+b", self.autocommit, self.tell()),
906
+ )
907
+
908
+ def __enter__(self):
909
+ return self.fh
910
+
911
+ def __exit__(self, exc_type, exc_val, exc_tb):
912
+ self.close()
913
+
914
+ def close(self):
915
+ self.size = self.fh.tell()
916
+ if self.closed:
917
+ return
918
+ self.fh.close()
919
+ self.closed = True
920
+ if self.autocommit:
921
+ self.commit()
922
+
923
+ def discard(self):
924
+ self.fh.close()
925
+ os.remove(self.fn)
926
+
927
+ def commit(self):
928
+ self.fs.put(self.fn, self.path, **self.kwargs)
929
+ # we do not delete local copy - it's still in the cache
930
+
931
+ @property
932
+ def name(self):
933
+ return self.fn
934
+
935
+ def __repr__(self) -> str:
936
+ return f"LocalTempFile: {self.path}"
937
+
938
+ def __getattr__(self, item):
939
+ return getattr(self.fh, item)
venv/lib/python3.10/site-packages/fsspec/implementations/dask.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dask
2
+ from distributed.client import Client, _get_global_client
3
+ from distributed.worker import Worker
4
+
5
+ from fsspec import filesystem
6
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
7
+ from fsspec.utils import infer_storage_options
8
+
9
+
10
+ def _get_client(client):
11
+ if client is None:
12
+ return _get_global_client()
13
+ elif isinstance(client, Client):
14
+ return client
15
+ else:
16
+ # e.g., connection string
17
+ return Client(client)
18
+
19
+
20
+ def _in_worker():
21
+ return bool(Worker._instances)
22
+
23
+
24
+ class DaskWorkerFileSystem(AbstractFileSystem):
25
+ """View files accessible to a worker as any other remote file-system
26
+
27
+ When instances are run on the worker, uses the real filesystem. When
28
+ run on the client, they call the worker to provide information or data.
29
+
30
+ **Warning** this implementation is experimental, and read-only for now.
31
+ """
32
+
33
+ def __init__(
34
+ self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs
35
+ ):
36
+ super().__init__(**kwargs)
37
+ if not (fs is None) ^ (target_protocol is None):
38
+ raise ValueError(
39
+ "Please provide one of filesystem instance (fs) or"
40
+ " target_protocol, not both"
41
+ )
42
+ self.target_protocol = target_protocol
43
+ self.target_options = target_options
44
+ self.worker = None
45
+ self.client = client
46
+ self.fs = fs
47
+ self._determine_worker()
48
+
49
+ @staticmethod
50
+ def _get_kwargs_from_urls(path):
51
+ so = infer_storage_options(path)
52
+ if "host" in so and "port" in so:
53
+ return {"client": f"{so['host']}:{so['port']}"}
54
+ else:
55
+ return {}
56
+
57
+ def _determine_worker(self):
58
+ if _in_worker():
59
+ self.worker = True
60
+ if self.fs is None:
61
+ self.fs = filesystem(
62
+ self.target_protocol, **(self.target_options or {})
63
+ )
64
+ else:
65
+ self.worker = False
66
+ self.client = _get_client(self.client)
67
+ self.rfs = dask.delayed(self)
68
+
69
+ def mkdir(self, *args, **kwargs):
70
+ if self.worker:
71
+ self.fs.mkdir(*args, **kwargs)
72
+ else:
73
+ self.rfs.mkdir(*args, **kwargs).compute()
74
+
75
+ def rm(self, *args, **kwargs):
76
+ if self.worker:
77
+ self.fs.rm(*args, **kwargs)
78
+ else:
79
+ self.rfs.rm(*args, **kwargs).compute()
80
+
81
+ def copy(self, *args, **kwargs):
82
+ if self.worker:
83
+ self.fs.copy(*args, **kwargs)
84
+ else:
85
+ self.rfs.copy(*args, **kwargs).compute()
86
+
87
+ def mv(self, *args, **kwargs):
88
+ if self.worker:
89
+ self.fs.mv(*args, **kwargs)
90
+ else:
91
+ self.rfs.mv(*args, **kwargs).compute()
92
+
93
+ def ls(self, *args, **kwargs):
94
+ if self.worker:
95
+ return self.fs.ls(*args, **kwargs)
96
+ else:
97
+ return self.rfs.ls(*args, **kwargs).compute()
98
+
99
+ def _open(
100
+ self,
101
+ path,
102
+ mode="rb",
103
+ block_size=None,
104
+ autocommit=True,
105
+ cache_options=None,
106
+ **kwargs,
107
+ ):
108
+ if self.worker:
109
+ return self.fs._open(
110
+ path,
111
+ mode=mode,
112
+ block_size=block_size,
113
+ autocommit=autocommit,
114
+ cache_options=cache_options,
115
+ **kwargs,
116
+ )
117
+ else:
118
+ return DaskFile(
119
+ fs=self,
120
+ path=path,
121
+ mode=mode,
122
+ block_size=block_size,
123
+ autocommit=autocommit,
124
+ cache_options=cache_options,
125
+ **kwargs,
126
+ )
127
+
128
+ def fetch_range(self, path, mode, start, end):
129
+ if self.worker:
130
+ with self._open(path, mode) as f:
131
+ f.seek(start)
132
+ return f.read(end - start)
133
+ else:
134
+ return self.rfs.fetch_range(path, mode, start, end).compute()
135
+
136
+
137
+ class DaskFile(AbstractBufferedFile):
138
+ def __init__(self, mode="rb", **kwargs):
139
+ if mode != "rb":
140
+ raise ValueError('Remote dask files can only be opened in "rb" mode')
141
+ super().__init__(**kwargs)
142
+
143
+ def _upload_chunk(self, final=False):
144
+ pass
145
+
146
+ def _initiate_upload(self):
147
+ """Create remote file/upload"""
148
+ pass
149
+
150
+ def _fetch_range(self, start, end):
151
+ """Get the specified set of bytes from remote"""
152
+ return self.fs.fetch_range(self.path, self.mode, start, end)
venv/lib/python3.10/site-packages/fsspec/implementations/data.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from typing import Optional
4
+ from urllib.parse import unquote
5
+
6
+ from fsspec import AbstractFileSystem
7
+
8
+
9
+ class DataFileSystem(AbstractFileSystem):
10
+ """A handy decoder for data-URLs
11
+
12
+ Example
13
+ -------
14
+ >>> with fsspec.open("data:,Hello%2C%20World%21") as f:
15
+ ... print(f.read())
16
+ b"Hello, World!"
17
+
18
+ See https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs
19
+ """
20
+
21
+ protocol = "data"
22
+
23
+ def __init__(self, **kwargs):
24
+ """No parameters for this filesystem"""
25
+ super().__init__(**kwargs)
26
+
27
+ def cat_file(self, path, start=None, end=None, **kwargs):
28
+ pref, data = path.split(",", 1)
29
+ if pref.endswith("base64"):
30
+ return base64.b64decode(data)[start:end]
31
+ return unquote(data).encode()[start:end]
32
+
33
+ def info(self, path, **kwargs):
34
+ pref, name = path.split(",", 1)
35
+ data = self.cat_file(path)
36
+ mime = pref.split(":", 1)[1].split(";", 1)[0]
37
+ return {"name": name, "size": len(data), "type": "file", "mimetype": mime}
38
+
39
+ def _open(
40
+ self,
41
+ path,
42
+ mode="rb",
43
+ block_size=None,
44
+ autocommit=True,
45
+ cache_options=None,
46
+ **kwargs,
47
+ ):
48
+ if "r" not in mode:
49
+ raise ValueError("Read only filesystem")
50
+ return io.BytesIO(self.cat_file(path))
51
+
52
+ @staticmethod
53
+ def encode(data: bytes, mime: Optional[str] = None):
54
+ """Format the given data into data-URL syntax
55
+
56
+ This version always base64 encodes, even when the data is ascii/url-safe.
57
+ """
58
+ return f"data:{mime or ''};base64,{base64.b64encode(data).decode()}"
venv/lib/python3.10/site-packages/fsspec/implementations/dbfs.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import urllib
3
+
4
+ import requests
5
+ import requests.exceptions
6
+ from requests.adapters import HTTPAdapter, Retry
7
+
8
+ from fsspec import AbstractFileSystem
9
+ from fsspec.spec import AbstractBufferedFile
10
+
11
+
12
+ class DatabricksException(Exception):
13
+ """
14
+ Helper class for exceptions raised in this module.
15
+ """
16
+
17
+ def __init__(self, error_code, message):
18
+ """Create a new DatabricksException"""
19
+ super().__init__(message)
20
+
21
+ self.error_code = error_code
22
+ self.message = message
23
+
24
+
25
+ class DatabricksFileSystem(AbstractFileSystem):
26
+ """
27
+ Get access to the Databricks filesystem implementation over HTTP.
28
+ Can be used inside and outside of a databricks cluster.
29
+ """
30
+
31
+ def __init__(self, instance, token, **kwargs):
32
+ """
33
+ Create a new DatabricksFileSystem.
34
+
35
+ Parameters
36
+ ----------
37
+ instance: str
38
+ The instance URL of the databricks cluster.
39
+ For example for an Azure databricks cluster, this
40
+ has the form adb-<some-number>.<two digits>.azuredatabricks.net.
41
+ token: str
42
+ Your personal token. Find out more
43
+ here: https://docs.databricks.com/dev-tools/api/latest/authentication.html
44
+ """
45
+ self.instance = instance
46
+ self.token = token
47
+ self.session = requests.Session()
48
+ self.retries = Retry(
49
+ total=10,
50
+ backoff_factor=0.05,
51
+ status_forcelist=[408, 429, 500, 502, 503, 504],
52
+ )
53
+
54
+ self.session.mount("https://", HTTPAdapter(max_retries=self.retries))
55
+ self.session.headers.update({"Authorization": f"Bearer {self.token}"})
56
+
57
+ super().__init__(**kwargs)
58
+
59
+ def ls(self, path, detail=True, **kwargs):
60
+ """
61
+ List the contents of the given path.
62
+
63
+ Parameters
64
+ ----------
65
+ path: str
66
+ Absolute path
67
+ detail: bool
68
+ Return not only the list of filenames,
69
+ but also additional information on file sizes
70
+ and types.
71
+ """
72
+ out = self._ls_from_cache(path)
73
+ if not out:
74
+ try:
75
+ r = self._send_to_api(
76
+ method="get", endpoint="list", json={"path": path}
77
+ )
78
+ except DatabricksException as e:
79
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
80
+ raise FileNotFoundError(e.message)
81
+
82
+ raise e
83
+ files = r["files"]
84
+ out = [
85
+ {
86
+ "name": o["path"],
87
+ "type": "directory" if o["is_dir"] else "file",
88
+ "size": o["file_size"],
89
+ }
90
+ for o in files
91
+ ]
92
+ self.dircache[path] = out
93
+
94
+ if detail:
95
+ return out
96
+ return [o["name"] for o in out]
97
+
98
+ def makedirs(self, path, exist_ok=True):
99
+ """
100
+ Create a given absolute path and all of its parents.
101
+
102
+ Parameters
103
+ ----------
104
+ path: str
105
+ Absolute path to create
106
+ exist_ok: bool
107
+ If false, checks if the folder
108
+ exists before creating it (and raises an
109
+ Exception if this is the case)
110
+ """
111
+ if not exist_ok:
112
+ try:
113
+ # If the following succeeds, the path is already present
114
+ self._send_to_api(
115
+ method="get", endpoint="get-status", json={"path": path}
116
+ )
117
+ raise FileExistsError(f"Path {path} already exists")
118
+ except DatabricksException as e:
119
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
120
+ pass
121
+
122
+ try:
123
+ self._send_to_api(method="post", endpoint="mkdirs", json={"path": path})
124
+ except DatabricksException as e:
125
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
126
+ raise FileExistsError(e.message)
127
+
128
+ raise e
129
+ self.invalidate_cache(self._parent(path))
130
+
131
+ def mkdir(self, path, create_parents=True, **kwargs):
132
+ """
133
+ Create a given absolute path and all of its parents.
134
+
135
+ Parameters
136
+ ----------
137
+ path: str
138
+ Absolute path to create
139
+ create_parents: bool
140
+ Whether to create all parents or not.
141
+ "False" is not implemented so far.
142
+ """
143
+ if not create_parents:
144
+ raise NotImplementedError
145
+
146
+ self.mkdirs(path, **kwargs)
147
+
148
+ def rm(self, path, recursive=False, **kwargs):
149
+ """
150
+ Remove the file or folder at the given absolute path.
151
+
152
+ Parameters
153
+ ----------
154
+ path: str
155
+ Absolute path what to remove
156
+ recursive: bool
157
+ Recursively delete all files in a folder.
158
+ """
159
+ try:
160
+ self._send_to_api(
161
+ method="post",
162
+ endpoint="delete",
163
+ json={"path": path, "recursive": recursive},
164
+ )
165
+ except DatabricksException as e:
166
+ # This is not really an exception, it just means
167
+ # not everything was deleted so far
168
+ if e.error_code == "PARTIAL_DELETE":
169
+ self.rm(path=path, recursive=recursive)
170
+ elif e.error_code == "IO_ERROR":
171
+ # Using the same exception as the os module would use here
172
+ raise OSError(e.message)
173
+
174
+ raise e
175
+ self.invalidate_cache(self._parent(path))
176
+
177
+ def mv(
178
+ self, source_path, destination_path, recursive=False, maxdepth=None, **kwargs
179
+ ):
180
+ """
181
+ Move a source to a destination path.
182
+
183
+ A note from the original [databricks API manual]
184
+ (https://docs.databricks.com/dev-tools/api/latest/dbfs.html#move).
185
+
186
+ When moving a large number of files the API call will time out after
187
+ approximately 60s, potentially resulting in partially moved data.
188
+ Therefore, for operations that move more than 10k files, we strongly
189
+ discourage using the DBFS REST API.
190
+
191
+ Parameters
192
+ ----------
193
+ source_path: str
194
+ From where to move (absolute path)
195
+ destination_path: str
196
+ To where to move (absolute path)
197
+ recursive: bool
198
+ Not implemented to far.
199
+ maxdepth:
200
+ Not implemented to far.
201
+ """
202
+ if recursive:
203
+ raise NotImplementedError
204
+ if maxdepth:
205
+ raise NotImplementedError
206
+
207
+ try:
208
+ self._send_to_api(
209
+ method="post",
210
+ endpoint="move",
211
+ json={"source_path": source_path, "destination_path": destination_path},
212
+ )
213
+ except DatabricksException as e:
214
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
215
+ raise FileNotFoundError(e.message)
216
+ elif e.error_code == "RESOURCE_ALREADY_EXISTS":
217
+ raise FileExistsError(e.message)
218
+
219
+ raise e
220
+ self.invalidate_cache(self._parent(source_path))
221
+ self.invalidate_cache(self._parent(destination_path))
222
+
223
+ def _open(self, path, mode="rb", block_size="default", **kwargs):
224
+ """
225
+ Overwrite the base class method to make sure to create a DBFile.
226
+ All arguments are copied from the base method.
227
+
228
+ Only the default blocksize is allowed.
229
+ """
230
+ return DatabricksFile(self, path, mode=mode, block_size=block_size, **kwargs)
231
+
232
+ def _send_to_api(self, method, endpoint, json):
233
+ """
234
+ Send the given json to the DBFS API
235
+ using a get or post request (specified by the argument `method`).
236
+
237
+ Parameters
238
+ ----------
239
+ method: str
240
+ Which http method to use for communication; "get" or "post".
241
+ endpoint: str
242
+ Where to send the request to (last part of the API URL)
243
+ json: dict
244
+ Dictionary of information to send
245
+ """
246
+ if method == "post":
247
+ session_call = self.session.post
248
+ elif method == "get":
249
+ session_call = self.session.get
250
+ else:
251
+ raise ValueError(f"Do not understand method {method}")
252
+
253
+ url = urllib.parse.urljoin(f"https://{self.instance}/api/2.0/dbfs/", endpoint)
254
+
255
+ r = session_call(url, json=json)
256
+
257
+ # The DBFS API will return a json, also in case of an exception.
258
+ # We want to preserve this information as good as possible.
259
+ try:
260
+ r.raise_for_status()
261
+ except requests.HTTPError as e:
262
+ # try to extract json error message
263
+ # if that fails, fall back to the original exception
264
+ try:
265
+ exception_json = e.response.json()
266
+ except Exception:
267
+ raise e
268
+
269
+ raise DatabricksException(**exception_json)
270
+
271
+ return r.json()
272
+
273
+ def _create_handle(self, path, overwrite=True):
274
+ """
275
+ Internal function to create a handle, which can be used to
276
+ write blocks of a file to DBFS.
277
+ A handle has a unique identifier which needs to be passed
278
+ whenever written during this transaction.
279
+ The handle is active for 10 minutes - after that a new
280
+ write transaction needs to be created.
281
+ Make sure to close the handle after you are finished.
282
+
283
+ Parameters
284
+ ----------
285
+ path: str
286
+ Absolute path for this file.
287
+ overwrite: bool
288
+ If a file already exist at this location, either overwrite
289
+ it or raise an exception.
290
+ """
291
+ try:
292
+ r = self._send_to_api(
293
+ method="post",
294
+ endpoint="create",
295
+ json={"path": path, "overwrite": overwrite},
296
+ )
297
+ return r["handle"]
298
+ except DatabricksException as e:
299
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
300
+ raise FileExistsError(e.message)
301
+
302
+ raise e
303
+
304
+ def _close_handle(self, handle):
305
+ """
306
+ Close a handle, which was opened by :func:`_create_handle`.
307
+
308
+ Parameters
309
+ ----------
310
+ handle: str
311
+ Which handle to close.
312
+ """
313
+ try:
314
+ self._send_to_api(method="post", endpoint="close", json={"handle": handle})
315
+ except DatabricksException as e:
316
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
317
+ raise FileNotFoundError(e.message)
318
+
319
+ raise e
320
+
321
+ def _add_data(self, handle, data):
322
+ """
323
+ Upload data to an already opened file handle
324
+ (opened by :func:`_create_handle`).
325
+ The maximal allowed data size is 1MB after
326
+ conversion to base64.
327
+ Remember to close the handle when you are finished.
328
+
329
+ Parameters
330
+ ----------
331
+ handle: str
332
+ Which handle to upload data to.
333
+ data: bytes
334
+ Block of data to add to the handle.
335
+ """
336
+ data = base64.b64encode(data).decode()
337
+ try:
338
+ self._send_to_api(
339
+ method="post",
340
+ endpoint="add-block",
341
+ json={"handle": handle, "data": data},
342
+ )
343
+ except DatabricksException as e:
344
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
345
+ raise FileNotFoundError(e.message)
346
+ elif e.error_code == "MAX_BLOCK_SIZE_EXCEEDED":
347
+ raise ValueError(e.message)
348
+
349
+ raise e
350
+
351
+ def _get_data(self, path, start, end):
352
+ """
353
+ Download data in bytes from a given absolute path in a block
354
+ from [start, start+length].
355
+ The maximum number of allowed bytes to read is 1MB.
356
+
357
+ Parameters
358
+ ----------
359
+ path: str
360
+ Absolute path to download data from
361
+ start: int
362
+ Start position of the block
363
+ end: int
364
+ End position of the block
365
+ """
366
+ try:
367
+ r = self._send_to_api(
368
+ method="get",
369
+ endpoint="read",
370
+ json={"path": path, "offset": start, "length": end - start},
371
+ )
372
+ return base64.b64decode(r["data"])
373
+ except DatabricksException as e:
374
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
375
+ raise FileNotFoundError(e.message)
376
+ elif e.error_code in ["INVALID_PARAMETER_VALUE", "MAX_READ_SIZE_EXCEEDED"]:
377
+ raise ValueError(e.message)
378
+
379
+ raise e
380
+
381
+ def invalidate_cache(self, path=None):
382
+ if path is None:
383
+ self.dircache.clear()
384
+ else:
385
+ self.dircache.pop(path, None)
386
+ super().invalidate_cache(path)
387
+
388
+
389
+ class DatabricksFile(AbstractBufferedFile):
390
+ """
391
+ Helper class for files referenced in the DatabricksFileSystem.
392
+ """
393
+
394
+ DEFAULT_BLOCK_SIZE = 1 * 2**20 # only allowed block size
395
+
396
+ def __init__(
397
+ self,
398
+ fs,
399
+ path,
400
+ mode="rb",
401
+ block_size="default",
402
+ autocommit=True,
403
+ cache_type="readahead",
404
+ cache_options=None,
405
+ **kwargs,
406
+ ):
407
+ """
408
+ Create a new instance of the DatabricksFile.
409
+
410
+ The blocksize needs to be the default one.
411
+ """
412
+ if block_size is None or block_size == "default":
413
+ block_size = self.DEFAULT_BLOCK_SIZE
414
+
415
+ assert (
416
+ block_size == self.DEFAULT_BLOCK_SIZE
417
+ ), f"Only the default block size is allowed, not {block_size}"
418
+
419
+ super().__init__(
420
+ fs,
421
+ path,
422
+ mode=mode,
423
+ block_size=block_size,
424
+ autocommit=autocommit,
425
+ cache_type=cache_type,
426
+ cache_options=cache_options or {},
427
+ **kwargs,
428
+ )
429
+
430
+ def _initiate_upload(self):
431
+ """Internal function to start a file upload"""
432
+ self.handle = self.fs._create_handle(self.path)
433
+
434
+ def _upload_chunk(self, final=False):
435
+ """Internal function to add a chunk of data to a started upload"""
436
+ self.buffer.seek(0)
437
+ data = self.buffer.getvalue()
438
+
439
+ data_chunks = [
440
+ data[start:end] for start, end in self._to_sized_blocks(len(data))
441
+ ]
442
+
443
+ for data_chunk in data_chunks:
444
+ self.fs._add_data(handle=self.handle, data=data_chunk)
445
+
446
+ if final:
447
+ self.fs._close_handle(handle=self.handle)
448
+ return True
449
+
450
+ def _fetch_range(self, start, end):
451
+ """Internal function to download a block of data"""
452
+ return_buffer = b""
453
+ length = end - start
454
+ for chunk_start, chunk_end in self._to_sized_blocks(length, start):
455
+ return_buffer += self.fs._get_data(
456
+ path=self.path, start=chunk_start, end=chunk_end
457
+ )
458
+
459
+ return return_buffer
460
+
461
+ def _to_sized_blocks(self, length, start=0):
462
+ """Helper function to split a range from 0 to total_length into bloksizes"""
463
+ end = start + length
464
+ for data_chunk in range(start, end, self.blocksize):
465
+ data_start = data_chunk
466
+ data_end = min(end, data_chunk + self.blocksize)
467
+ yield data_start, data_end