applied-ai-018 commited on
Commit
14de04b
·
verified ·
1 Parent(s): 4fac1bc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_01_optim_states.pt +3 -0
  2. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_146_mp_rank_01_optim_states.pt +3 -0
  3. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_231_mp_rank_00_optim_states.pt +3 -0
  4. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_244_mp_rank_03_optim_states.pt +3 -0
  5. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_80_mp_rank_03_optim_states.pt +3 -0
  6. venv/lib/python3.10/site-packages/examples/__init__.py +0 -0
  7. venv/lib/python3.10/site-packages/examples/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/examples/__pycache__/basic_example.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/examples/basic_example.py +60 -0
  10. venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/huggingface_hub/inference/__init__.py +0 -0
  15. venv/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/huggingface_hub/inference/_client.py +0 -0
  17. venv/lib/python3.10/site-packages/huggingface_hub/inference/_common.py +482 -0
  18. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
  21. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
  22. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py +50 -0
  23. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
  24. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
  25. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py +105 -0
  26. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py +55 -0
  27. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py +46 -0
  28. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py +45 -0
  29. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py +161 -0
  30. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py +57 -0
  31. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/token_classification.py +53 -0
  32. venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py +53 -0
  33. venv/lib/python3.10/site-packages/huggingface_hub/inference/_templating.py +105 -0
  34. venv/lib/python3.10/site-packages/huggingface_hub/inference/_types.py +52 -0
  35. venv/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py +122 -0
  36. venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py +135 -0
  41. venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py +813 -0
  42. venv/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py +136 -0
  43. venv/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py +397 -0
  44. venv/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py +66 -0
  45. venv/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py +93 -0
  46. venv/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py +121 -0
  47. venv/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py +241 -0
  48. venv/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py +96 -0
  49. venv/lib/python3.10/site-packages/huggingface_hub/utils/_http.py +321 -0
  50. venv/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py +52 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_01_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2270217a6beedf39e3ceaf34c23c471933c3cbfd459c2a6a65c9d489ab829e
3
+ size 41830138
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_146_mp_rank_01_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de11eef1689266966a61852ddd9640b55f8aeec0a7d78c5f61eb5c0b6c234dba
3
+ size 41830212
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_231_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5371e49b761b2e1ade3343ff95bdda562973d333951ba9ced82c2317903e63b2
3
+ size 41830148
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_244_mp_rank_03_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09970cc0b8f7909e6b1f4b833b8363b2a8176a2aa1c8dadbf799e0d7759fd307
3
+ size 41830340
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_80_mp_rank_03_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f30d12eb1bb108cd8b5fb9939f1fe4c9cef5e889f27b506d012b61ff5329b2b1
3
+ size 41830330
venv/lib/python3.10/site-packages/examples/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/examples/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
venv/lib/python3.10/site-packages/examples/__pycache__/basic_example.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
venv/lib/python3.10/site-packages/examples/basic_example.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from time import sleep
2
+ import multiprocessing
3
+
4
+ import logging
5
+ from tqdm_multiprocess.logger import setup_logger_tqdm
6
+ logger = logging.getLogger(__name__)
7
+
8
+ from tqdm_multiprocess import TqdmMultiProcessPool
9
+
10
+ def some_other_function(tqdm_func):
11
+ iterations1 = 100
12
+ iterations2 = 5
13
+ iterations3 = 2
14
+
15
+ total_iterations = iterations1 * iterations2 * iterations3
16
+ with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress3:
17
+ progress3.set_description("outer")
18
+ for i in range(iterations3):
19
+ logger.info("outer")
20
+ total_iterations = iterations1 * iterations2
21
+ with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress2:
22
+ progress2.set_description("middle")
23
+ for j in range(iterations2):
24
+ logger.info("middle")
25
+ #for k in tqdm_func(range(iterations1), dynamic_ncols=True, desc="inner"):
26
+ with tqdm_func(total=iterations1, dynamic_ncols=True) as progress1:
27
+ for j in range(iterations1):
28
+ # logger.info("inner") # Spam slows down tqdm too much
29
+ progress1.set_description("innert")
30
+ sleep(0.01)
31
+ progress1.update()
32
+ progress2.update()
33
+ progress3.update()
34
+
35
+ logger.warning(f"Warning test message. {multiprocessing.current_process().name}")
36
+ logger.error(f"Error test message. {multiprocessing.current_process().name}")
37
+
38
+
39
+ # Multiprocessed
40
+ def example_multiprocessing_function(some_input, tqdm_func):
41
+ logger.debug(f"Debug test message - I won't show up in console. {multiprocessing.current_process().name}")
42
+ logger.info(f"Info test message. {multiprocessing.current_process().name}")
43
+ some_other_function(tqdm_func)
44
+ return True
45
+
46
+ def error_callback():
47
+ print("Error!")
48
+
49
+ def example():
50
+ pool = TqdmMultiProcessPool()
51
+ process_count = 4
52
+ task_count = 10
53
+ initial_tasks = [(example_multiprocessing_function, (i,)) for i in range(task_count)]
54
+ results = pool.map(process_count, initial_tasks, error_callback)
55
+ print(results)
56
+
57
+ if __name__ == '__main__':
58
+ logfile_path = "tqdm_multiprocessing_example.log"
59
+ setup_logger_tqdm(logfile_path) # Logger will write messages using tqdm.write
60
+ example()
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc ADDED
Binary file (23.9 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc ADDED
Binary file (29.5 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/inference/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/inference/_client.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/huggingface_hub/inference/_common.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities used by both the sync and async inference clients."""
16
+
17
+ import base64
18
+ import io
19
+ import json
20
+ import logging
21
+ import time
22
+ from contextlib import contextmanager
23
+ from dataclasses import dataclass
24
+ from pathlib import Path
25
+ from typing import (
26
+ TYPE_CHECKING,
27
+ Any,
28
+ AsyncIterable,
29
+ BinaryIO,
30
+ ContextManager,
31
+ Dict,
32
+ Generator,
33
+ Iterable,
34
+ List,
35
+ Literal,
36
+ NoReturn,
37
+ Optional,
38
+ Set,
39
+ Union,
40
+ overload,
41
+ )
42
+
43
+ from requests import HTTPError
44
+
45
+ from huggingface_hub.errors import (
46
+ GenerationError,
47
+ IncompleteGenerationError,
48
+ OverloadedError,
49
+ TextGenerationError,
50
+ UnknownError,
51
+ ValidationError,
52
+ )
53
+
54
+ from ..constants import ENDPOINT
55
+ from ..utils import (
56
+ build_hf_headers,
57
+ get_session,
58
+ hf_raise_for_status,
59
+ is_aiohttp_available,
60
+ is_numpy_available,
61
+ is_pillow_available,
62
+ )
63
+ from ._generated.types import (
64
+ ChatCompletionStreamOutput,
65
+ ChatCompletionStreamOutputChoice,
66
+ ChatCompletionStreamOutputDelta,
67
+ TextGenerationStreamOutput,
68
+ )
69
+
70
+
71
+ if TYPE_CHECKING:
72
+ from aiohttp import ClientResponse, ClientSession
73
+ from PIL import Image
74
+
75
+ # TYPES
76
+ UrlT = str
77
+ PathT = Union[str, Path]
78
+ BinaryT = Union[bytes, BinaryIO]
79
+ ContentT = Union[BinaryT, PathT, UrlT]
80
+
81
+ # Use to set a Accept: image/png header
82
+ TASKS_EXPECTING_IMAGES = {"text-to-image", "image-to-image"}
83
+
84
+ logger = logging.getLogger(__name__)
85
+
86
+
87
+ # Add dataclass for ModelStatus. We use this dataclass in get_model_status function.
88
+ @dataclass
89
+ class ModelStatus:
90
+ """
91
+ This Dataclass represents the the model status in the Hugging Face Inference API.
92
+
93
+ Args:
94
+ loaded (`bool`):
95
+ If the model is currently loaded into Hugging Face's InferenceAPI. Models
96
+ are loaded on-demand, leading to the user's first request taking longer.
97
+ If a model is loaded, you can be assured that it is in a healthy state.
98
+ state (`str`):
99
+ The current state of the model. This can be 'Loaded', 'Loadable', 'TooBig'.
100
+ If a model's state is 'Loadable', it's not too big and has a supported
101
+ backend. Loadable models are automatically loaded when the user first
102
+ requests inference on the endpoint. This means it is transparent for the
103
+ user to load a model, except that the first call takes longer to complete.
104
+ compute_type (`Dict`):
105
+ Information about the compute resource the model is using or will use, such as 'gpu' type and number of
106
+ replicas.
107
+ framework (`str`):
108
+ The name of the framework that the model was built with, such as 'transformers'
109
+ or 'text-generation-inference'.
110
+ """
111
+
112
+ loaded: bool
113
+ state: str
114
+ compute_type: Dict
115
+ framework: str
116
+
117
+
118
+ ## IMPORT UTILS
119
+
120
+
121
+ def _import_aiohttp():
122
+ # Make sure `aiohttp` is installed on the machine.
123
+ if not is_aiohttp_available():
124
+ raise ImportError("Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).")
125
+ import aiohttp
126
+
127
+ return aiohttp
128
+
129
+
130
+ def _import_numpy():
131
+ """Make sure `numpy` is installed on the machine."""
132
+ if not is_numpy_available():
133
+ raise ImportError("Please install numpy to use deal with embeddings (`pip install numpy`).")
134
+ import numpy
135
+
136
+ return numpy
137
+
138
+
139
+ def _import_pil_image():
140
+ """Make sure `PIL` is installed on the machine."""
141
+ if not is_pillow_available():
142
+ raise ImportError(
143
+ "Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be"
144
+ " post-processed, use `client.post(...)` and get the raw response from the server."
145
+ )
146
+ from PIL import Image
147
+
148
+ return Image
149
+
150
+
151
+ ## RECOMMENDED MODELS
152
+
153
+ # Will be globally fetched only once (see '_fetch_recommended_models')
154
+ _RECOMMENDED_MODELS: Optional[Dict[str, Optional[str]]] = None
155
+
156
+
157
+ def _fetch_recommended_models() -> Dict[str, Optional[str]]:
158
+ global _RECOMMENDED_MODELS
159
+ if _RECOMMENDED_MODELS is None:
160
+ response = get_session().get(f"{ENDPOINT}/api/tasks", headers=build_hf_headers())
161
+ hf_raise_for_status(response)
162
+ _RECOMMENDED_MODELS = {
163
+ task: _first_or_none(details["widgetModels"]) for task, details in response.json().items()
164
+ }
165
+ return _RECOMMENDED_MODELS
166
+
167
+
168
+ def _first_or_none(items: List[Any]) -> Optional[Any]:
169
+ try:
170
+ return items[0] or None
171
+ except IndexError:
172
+ return None
173
+
174
+
175
+ ## ENCODING / DECODING UTILS
176
+
177
+
178
+ @overload
179
+ def _open_as_binary(
180
+ content: ContentT,
181
+ ) -> ContextManager[BinaryT]: ... # means "if input is not None, output is not None"
182
+
183
+
184
+ @overload
185
+ def _open_as_binary(
186
+ content: Literal[None],
187
+ ) -> ContextManager[Literal[None]]: ... # means "if input is None, output is None"
188
+
189
+
190
+ @contextmanager # type: ignore
191
+ def _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]:
192
+ """Open `content` as a binary file, either from a URL, a local path, or raw bytes.
193
+
194
+ Do nothing if `content` is None,
195
+
196
+ TODO: handle a PIL.Image as input
197
+ TODO: handle base64 as input
198
+ """
199
+ # If content is a string => must be either a URL or a path
200
+ if isinstance(content, str):
201
+ if content.startswith("https://") or content.startswith("http://"):
202
+ logger.debug(f"Downloading content from {content}")
203
+ yield get_session().get(content).content # TODO: retrieve as stream and pipe to post request ?
204
+ return
205
+ content = Path(content)
206
+ if not content.exists():
207
+ raise FileNotFoundError(
208
+ f"File not found at {content}. If `data` is a string, it must either be a URL or a path to a local"
209
+ " file. To pass raw content, please encode it as bytes first."
210
+ )
211
+
212
+ # If content is a Path => open it
213
+ if isinstance(content, Path):
214
+ logger.debug(f"Opening content from {content}")
215
+ with content.open("rb") as f:
216
+ yield f
217
+ else:
218
+ # Otherwise: already a file-like object or None
219
+ yield content
220
+
221
+
222
+ def _b64_encode(content: ContentT) -> str:
223
+ """Encode a raw file (image, audio) into base64. Can be byes, an opened file, a path or a URL."""
224
+ with _open_as_binary(content) as data:
225
+ data_as_bytes = data if isinstance(data, bytes) else data.read()
226
+ return base64.b64encode(data_as_bytes).decode()
227
+
228
+
229
+ def _b64_to_image(encoded_image: str) -> "Image":
230
+ """Parse a base64-encoded string into a PIL Image."""
231
+ Image = _import_pil_image()
232
+ return Image.open(io.BytesIO(base64.b64decode(encoded_image)))
233
+
234
+
235
+ def _bytes_to_list(content: bytes) -> List:
236
+ """Parse bytes from a Response object into a Python list.
237
+
238
+ Expects the response body to be JSON-encoded data.
239
+
240
+ NOTE: This is exactly the same implementation as `_bytes_to_dict` and will not complain if the returned data is a
241
+ dictionary. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.
242
+ """
243
+ return json.loads(content.decode())
244
+
245
+
246
+ def _bytes_to_dict(content: bytes) -> Dict:
247
+ """Parse bytes from a Response object into a Python dictionary.
248
+
249
+ Expects the response body to be JSON-encoded data.
250
+
251
+ NOTE: This is exactly the same implementation as `_bytes_to_list` and will not complain if the returned data is a
252
+ list. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.
253
+ """
254
+ return json.loads(content.decode())
255
+
256
+
257
+ def _bytes_to_image(content: bytes) -> "Image":
258
+ """Parse bytes from a Response object into a PIL Image.
259
+
260
+ Expects the response body to be raw bytes. To deal with b64 encoded images, use `_b64_to_image` instead.
261
+ """
262
+ Image = _import_pil_image()
263
+ return Image.open(io.BytesIO(content))
264
+
265
+
266
+ ## STREAMING UTILS
267
+
268
+
269
+ def _stream_text_generation_response(
270
+ bytes_output_as_lines: Iterable[bytes], details: bool
271
+ ) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]:
272
+ """Used in `InferenceClient.text_generation`."""
273
+ # Parse ServerSentEvents
274
+ for byte_payload in bytes_output_as_lines:
275
+ output = _format_text_generation_stream_output(byte_payload, details)
276
+ if output is not None:
277
+ yield output
278
+
279
+
280
+ async def _async_stream_text_generation_response(
281
+ bytes_output_as_lines: AsyncIterable[bytes], details: bool
282
+ ) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]:
283
+ """Used in `AsyncInferenceClient.text_generation`."""
284
+ # Parse ServerSentEvents
285
+ async for byte_payload in bytes_output_as_lines:
286
+ output = _format_text_generation_stream_output(byte_payload, details)
287
+ if output is not None:
288
+ yield output
289
+
290
+
291
+ def _format_text_generation_stream_output(
292
+ byte_payload: bytes, details: bool
293
+ ) -> Optional[Union[str, TextGenerationStreamOutput]]:
294
+ if not byte_payload.startswith(b"data:"):
295
+ return None # empty line
296
+
297
+ # Decode payload
298
+ payload = byte_payload.decode("utf-8")
299
+ json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
300
+
301
+ # Either an error as being returned
302
+ if json_payload.get("error") is not None:
303
+ raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type"))
304
+
305
+ # Or parse token payload
306
+ output = TextGenerationStreamOutput.parse_obj_as_instance(json_payload)
307
+ return output.token.text if not details else output
308
+
309
+
310
+ def _stream_chat_completion_response_from_text_generation(
311
+ text_generation_output: Iterable[TextGenerationStreamOutput],
312
+ ) -> Iterable[ChatCompletionStreamOutput]:
313
+ """Used in `InferenceClient.chat_completion`."""
314
+ created = int(time.time())
315
+ for item in text_generation_output:
316
+ yield _format_chat_completion_stream_output_from_text_generation(item, created)
317
+
318
+
319
+ async def _async_stream_chat_completion_response_from_text_generation(
320
+ text_generation_output: AsyncIterable[TextGenerationStreamOutput],
321
+ ) -> AsyncIterable[ChatCompletionStreamOutput]:
322
+ """Used in `AsyncInferenceClient.chat_completion`."""
323
+ created = int(time.time())
324
+ async for item in text_generation_output:
325
+ yield _format_chat_completion_stream_output_from_text_generation(item, created)
326
+
327
+
328
+ def _format_chat_completion_stream_output_from_text_generation(
329
+ item: TextGenerationStreamOutput, created: int
330
+ ) -> ChatCompletionStreamOutput:
331
+ if item.details is None:
332
+ # new token generated => return delta
333
+ return ChatCompletionStreamOutput(
334
+ choices=[
335
+ ChatCompletionStreamOutputChoice(
336
+ delta=ChatCompletionStreamOutputDelta(
337
+ role="assistant",
338
+ content=item.token.text,
339
+ ),
340
+ finish_reason=None,
341
+ index=0,
342
+ )
343
+ ],
344
+ created=created,
345
+ )
346
+ else:
347
+ # generation is completed => return finish reason
348
+ return ChatCompletionStreamOutput(
349
+ choices=[
350
+ ChatCompletionStreamOutputChoice(
351
+ delta=ChatCompletionStreamOutputDelta(),
352
+ finish_reason=item.details.finish_reason,
353
+ index=0,
354
+ )
355
+ ],
356
+ created=created,
357
+ )
358
+
359
+
360
+ def _stream_chat_completion_response_from_bytes(
361
+ bytes_lines: Iterable[bytes],
362
+ ) -> Iterable[ChatCompletionStreamOutput]:
363
+ """Used in `InferenceClient.chat_completion` if model is served with TGI."""
364
+ for item in bytes_lines:
365
+ output = _format_chat_completion_stream_output_from_text_generation_from_bytes(item)
366
+ if output is not None:
367
+ yield output
368
+
369
+
370
+ async def _async_stream_chat_completion_response_from_bytes(
371
+ bytes_lines: AsyncIterable[bytes],
372
+ ) -> AsyncIterable[ChatCompletionStreamOutput]:
373
+ """Used in `AsyncInferenceClient.chat_completion`."""
374
+ async for item in bytes_lines:
375
+ output = _format_chat_completion_stream_output_from_text_generation_from_bytes(item)
376
+ if output is not None:
377
+ yield output
378
+
379
+
380
+ def _format_chat_completion_stream_output_from_text_generation_from_bytes(
381
+ byte_payload: bytes,
382
+ ) -> Optional[ChatCompletionStreamOutput]:
383
+ if not byte_payload.startswith(b"data:"):
384
+ return None # empty line
385
+
386
+ # Decode payload
387
+ payload = byte_payload.decode("utf-8")
388
+ json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
389
+ return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload)
390
+
391
+
392
+ async def _async_yield_from(client: "ClientSession", response: "ClientResponse") -> AsyncIterable[bytes]:
393
+ async for byte_payload in response.content:
394
+ yield byte_payload
395
+ await client.close()
396
+
397
+
398
+ # "TGI servers" are servers running with the `text-generation-inference` backend.
399
+ # This backend is the go-to solution to run large language models at scale. However,
400
+ # for some smaller models (e.g. "gpt2") the default `transformers` + `api-inference`
401
+ # solution is still in use.
402
+ #
403
+ # Both approaches have very similar APIs, but not exactly the same. What we do first in
404
+ # the `text_generation` method is to assume the model is served via TGI. If we realize
405
+ # it's not the case (i.e. we receive an HTTP 400 Bad Request), we fallback to the
406
+ # default API with a warning message. We remember for each model if it's a TGI server
407
+ # or not using `_NON_TGI_SERVERS` global variable.
408
+ #
409
+ # In addition, TGI servers have a built-in API route for chat-completion, which is not
410
+ # available on the default API. We use this route to provide a more consistent behavior
411
+ # when available.
412
+ #
413
+ # For more details, see https://github.com/huggingface/text-generation-inference and
414
+ # https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task.
415
+
416
+ _NON_TGI_SERVERS: Set[Optional[str]] = set()
417
+
418
+
419
+ def _set_as_non_tgi(model: Optional[str]) -> None:
420
+ _NON_TGI_SERVERS.add(model)
421
+
422
+
423
+ def _is_tgi_server(model: Optional[str]) -> bool:
424
+ return model not in _NON_TGI_SERVERS
425
+
426
+
427
+ _NON_CHAT_COMPLETION_SERVER: Set[str] = set()
428
+
429
+
430
+ def _set_as_non_chat_completion_server(model: str) -> None:
431
+ print("Set as non chat completion", model)
432
+ _NON_CHAT_COMPLETION_SERVER.add(model)
433
+
434
+
435
+ def _is_chat_completion_server(model: str) -> bool:
436
+ return model not in _NON_CHAT_COMPLETION_SERVER
437
+
438
+
439
+ # TEXT GENERATION ERRORS
440
+ # ----------------------
441
+ # Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation
442
+ # inference project (https://github.com/huggingface/text-generation-inference).
443
+ # ----------------------
444
+
445
+
446
+ def raise_text_generation_error(http_error: HTTPError) -> NoReturn:
447
+ """
448
+ Try to parse text-generation-inference error message and raise HTTPError in any case.
449
+
450
+ Args:
451
+ error (`HTTPError`):
452
+ The HTTPError that have been raised.
453
+ """
454
+ # Try to parse a Text Generation Inference error
455
+
456
+ try:
457
+ # Hacky way to retrieve payload in case of aiohttp error
458
+ payload = getattr(http_error, "response_error_payload", None) or http_error.response.json()
459
+ error = payload.get("error")
460
+ error_type = payload.get("error_type")
461
+ except Exception: # no payload
462
+ raise http_error
463
+
464
+ # If error_type => more information than `hf_raise_for_status`
465
+ if error_type is not None:
466
+ exception = _parse_text_generation_error(error, error_type)
467
+ raise exception from http_error
468
+
469
+ # Otherwise, fallback to default error
470
+ raise http_error
471
+
472
+
473
+ def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError:
474
+ if error_type == "generation":
475
+ return GenerationError(error) # type: ignore
476
+ if error_type == "incomplete_generation":
477
+ return IncompleteGenerationError(error) # type: ignore
478
+ if error_type == "overloaded":
479
+ return OverloadedError(error) # type: ignore
480
+ if error_type == "validation":
481
+ return ValidationError(error) # type: ignore
482
+ return UnknownError(error) # type: ignore
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc ADDED
Binary file (92.3 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class AudioToAudioInput(BaseInferenceType):
14
+ """Inputs for Audio to Audio inference"""
15
+
16
+ inputs: Any
17
+ """The input audio data"""
18
+
19
+
20
+ @dataclass
21
+ class AudioToAudioOutputElement(BaseInferenceType):
22
+ """Outputs of inference for the Audio To Audio task
23
+ A generated audio file with its label.
24
+ """
25
+
26
+ blob: Any
27
+ """The generated audio file."""
28
+ content_type: str
29
+ """The content type of audio file."""
30
+ label: str
31
+ """The label of the audio file."""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, List, Optional, Union
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class DocumentQuestionAnsweringInputData(BaseInferenceType):
14
+ """One (document, question) pair to answer"""
15
+
16
+ image: Any
17
+ """The image on which the question is asked"""
18
+ question: str
19
+ """A question to ask of the document"""
20
+
21
+
22
+ @dataclass
23
+ class DocumentQuestionAnsweringParameters(BaseInferenceType):
24
+ """Additional inference parameters
25
+ Additional inference parameters for Document Question Answering
26
+ """
27
+
28
+ doc_stride: Optional[int] = None
29
+ """If the words in the document are too long to fit with the question for the model, it will
30
+ be split in several chunks with some overlap. This argument controls the size of that
31
+ overlap.
32
+ """
33
+ handle_impossible_answer: Optional[bool] = None
34
+ """Whether to accept impossible as an answer"""
35
+ lang: Optional[str] = None
36
+ """Language to use while running OCR. Defaults to english."""
37
+ max_answer_len: Optional[int] = None
38
+ """The maximum length of predicted answers (e.g., only answers with a shorter length are
39
+ considered).
40
+ """
41
+ max_question_len: Optional[int] = None
42
+ """The maximum length of the question after tokenization. It will be truncated if needed."""
43
+ max_seq_len: Optional[int] = None
44
+ """The maximum length of the total sentence (context + question) in tokens of each chunk
45
+ passed to the model. The context will be split in several chunks (using doc_stride as
46
+ overlap) if needed.
47
+ """
48
+ top_k: Optional[int] = None
49
+ """The number of answers to return (will be chosen by order of likelihood). Can return less
50
+ than top_k answers if there are not enough options available within the context.
51
+ """
52
+ word_boxes: Optional[List[Union[List[float], str]]] = None
53
+ """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
54
+ skip the OCR step and use the provided bounding boxes instead.
55
+ """
56
+
57
+
58
+ @dataclass
59
+ class DocumentQuestionAnsweringInput(BaseInferenceType):
60
+ """Inputs for Document Question Answering inference"""
61
+
62
+ inputs: DocumentQuestionAnsweringInputData
63
+ """One (document, question) pair to answer"""
64
+ parameters: Optional[DocumentQuestionAnsweringParameters] = None
65
+ """Additional inference parameters"""
66
+
67
+
68
+ @dataclass
69
+ class DocumentQuestionAnsweringOutputElement(BaseInferenceType):
70
+ """Outputs of inference for the Document Question Answering task"""
71
+
72
+ answer: str
73
+ """The answer to the question."""
74
+ end: int
75
+ """The end word index of the answer (in the OCR’d version of the input or provided word
76
+ boxes).
77
+ """
78
+ score: float
79
+ """The probability associated to the answer."""
80
+ start: int
81
+ """The start word index of the answer (in the OCR’d version of the input or provided word
82
+ boxes).
83
+ """
84
+ words: List[int]
85
+ """The index of each word/box pair that is in the answer"""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, List, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class FillMaskParameters(BaseInferenceType):
14
+ """Additional inference parameters
15
+ Additional inference parameters for Fill Mask
16
+ """
17
+
18
+ targets: Optional[List[str]] = None
19
+ """When passed, the model will limit the scores to the passed targets instead of looking up
20
+ in the whole vocabulary. If the provided targets are not in the model vocab, they will be
21
+ tokenized and the first resulting token will be used (with a warning, and that might be
22
+ slower).
23
+ """
24
+ top_k: Optional[int] = None
25
+ """When passed, overrides the number of predictions to return."""
26
+
27
+
28
+ @dataclass
29
+ class FillMaskInput(BaseInferenceType):
30
+ """Inputs for Fill Mask inference"""
31
+
32
+ inputs: str
33
+ """The text with masked tokens"""
34
+ parameters: Optional[FillMaskParameters] = None
35
+ """Additional inference parameters"""
36
+
37
+
38
+ @dataclass
39
+ class FillMaskOutputElement(BaseInferenceType):
40
+ """Outputs of inference for the Fill Mask task"""
41
+
42
+ score: float
43
+ """The corresponding probability"""
44
+ sequence: str
45
+ """The corresponding input with the mask token prediction."""
46
+ token: int
47
+ """The predicted token id (to replace the masked one)."""
48
+ token_str: Any
49
+ fill_mask_output_token_str: Optional[str] = None
50
+ """The predicted token (to replace the masked one)."""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, Literal, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]
13
+
14
+
15
+ @dataclass
16
+ class ImageSegmentationParameters(BaseInferenceType):
17
+ """Additional inference parameters
18
+ Additional inference parameters for Image Segmentation
19
+ """
20
+
21
+ mask_threshold: Optional[float] = None
22
+ """Threshold to use when turning the predicted masks into binary values."""
23
+ overlap_mask_area_threshold: Optional[float] = None
24
+ """Mask overlap threshold to eliminate small, disconnected segments."""
25
+ subtask: Optional["ImageSegmentationSubtask"] = None
26
+ """Segmentation task to be performed, depending on model capabilities."""
27
+ threshold: Optional[float] = None
28
+ """Probability threshold to filter out predicted masks."""
29
+
30
+
31
+ @dataclass
32
+ class ImageSegmentationInput(BaseInferenceType):
33
+ """Inputs for Image Segmentation inference"""
34
+
35
+ inputs: Any
36
+ """The input image data"""
37
+ parameters: Optional[ImageSegmentationParameters] = None
38
+ """Additional inference parameters"""
39
+
40
+
41
+ @dataclass
42
+ class ImageSegmentationOutputElement(BaseInferenceType):
43
+ """Outputs of inference for the Image Segmentation task
44
+ A predicted mask / segment
45
+ """
46
+
47
+ label: str
48
+ """The label of the predicted segment"""
49
+ mask: Any
50
+ """The corresponding mask as a black-and-white image"""
51
+ score: Optional[float] = None
52
+ """The score or confidence degreee the model has"""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, List, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class ImageToImageTargetSize(BaseInferenceType):
14
+ """The size in pixel of the output image"""
15
+
16
+ height: int
17
+ width: int
18
+
19
+
20
+ @dataclass
21
+ class ImageToImageParameters(BaseInferenceType):
22
+ """Additional inference parameters
23
+ Additional inference parameters for Image To Image
24
+ """
25
+
26
+ guidance_scale: Optional[float] = None
27
+ """For diffusion models. A higher guidance scale value encourages the model to generate
28
+ images closely linked to the text prompt at the expense of lower image quality.
29
+ """
30
+ negative_prompt: Optional[List[str]] = None
31
+ """One or several prompt to guide what NOT to include in image generation."""
32
+ num_inference_steps: Optional[int] = None
33
+ """For diffusion models. The number of denoising steps. More denoising steps usually lead to
34
+ a higher quality image at the expense of slower inference.
35
+ """
36
+ target_size: Optional[ImageToImageTargetSize] = None
37
+ """The size in pixel of the output image"""
38
+
39
+
40
+ @dataclass
41
+ class ImageToImageInput(BaseInferenceType):
42
+ """Inputs for Image To Image inference"""
43
+
44
+ inputs: Any
45
+ """The input image data"""
46
+ parameters: Optional[ImageToImageParameters] = None
47
+ """Additional inference parameters"""
48
+
49
+
50
+ @dataclass
51
+ class ImageToImageOutput(BaseInferenceType):
52
+ """Outputs of inference for the Image To Image task"""
53
+
54
+ image: Any
55
+ """The output image"""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, Literal, Optional, Union
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ EarlyStoppingEnum = Literal["never"]
13
+
14
+
15
+ @dataclass
16
+ class ImageToTextGenerationParameters(BaseInferenceType):
17
+ """Parametrization of the text generation process
18
+ Ad-hoc parametrization of the text generation process
19
+ """
20
+
21
+ do_sample: Optional[bool] = None
22
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
23
+ early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None
24
+ """Controls the stopping condition for beam-based methods."""
25
+ epsilon_cutoff: Optional[float] = None
26
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
27
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
28
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
29
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
30
+ """
31
+ eta_cutoff: Optional[float] = None
32
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
33
+ float strictly between 0 and 1, a token is only considered if it is greater than either
34
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
35
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
36
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
37
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
38
+ for more details.
39
+ """
40
+ max_length: Optional[int] = None
41
+ """The maximum length (in tokens) of the generated text, including the input."""
42
+ max_new_tokens: Optional[int] = None
43
+ """The maximum number of tokens to generate. Takes precedence over maxLength."""
44
+ min_length: Optional[int] = None
45
+ """The minimum length (in tokens) of the generated text, including the input."""
46
+ min_new_tokens: Optional[int] = None
47
+ """The minimum number of tokens to generate. Takes precedence over maxLength."""
48
+ num_beam_groups: Optional[int] = None
49
+ """Number of groups to divide num_beams into in order to ensure diversity among different
50
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
51
+ """
52
+ num_beams: Optional[int] = None
53
+ """Number of beams to use for beam search."""
54
+ penalty_alpha: Optional[float] = None
55
+ """The value balances the model confidence and the degeneration penalty in contrastive
56
+ search decoding.
57
+ """
58
+ temperature: Optional[float] = None
59
+ """The value used to modulate the next token probabilities."""
60
+ top_k: Optional[int] = None
61
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
62
+ top_p: Optional[float] = None
63
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
64
+ that add up to top_p or higher are kept for generation.
65
+ """
66
+ typical_p: Optional[float] = None
67
+ """Local typicality measures how similar the conditional probability of predicting a target
68
+ token next is to the expected conditional probability of predicting a random token next,
69
+ given the partial text already generated. If set to float < 1, the smallest set of the
70
+ most locally typical tokens with probabilities that add up to typical_p or higher are
71
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
72
+ """
73
+ use_cache: Optional[bool] = None
74
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
75
+
76
+
77
+ @dataclass
78
+ class ImageToTextParameters(BaseInferenceType):
79
+ """Additional inference parameters
80
+ Additional inference parameters for Image To Text
81
+ """
82
+
83
+ generate: Optional[ImageToTextGenerationParameters] = None
84
+ """Parametrization of the text generation process"""
85
+ max_new_tokens: Optional[int] = None
86
+ """The amount of maximum tokens to generate."""
87
+
88
+
89
+ @dataclass
90
+ class ImageToTextInput(BaseInferenceType):
91
+ """Inputs for Image To Text inference"""
92
+
93
+ inputs: Any
94
+ """The input image data"""
95
+ parameters: Optional[ImageToTextParameters] = None
96
+ """Additional inference parameters"""
97
+
98
+
99
+ @dataclass
100
+ class ImageToTextOutput(BaseInferenceType):
101
+ """Outputs of inference for the Image To Text task"""
102
+
103
+ generated_text: Any
104
+ image_to_text_output_generated_text: Optional[str] = None
105
+ """The generated text."""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class ObjectDetectionParameters(BaseInferenceType):
14
+ """Additional inference parameters
15
+ Additional inference parameters for Object Detection
16
+ """
17
+
18
+ threshold: Optional[float] = None
19
+ """The probability necessary to make a prediction."""
20
+
21
+
22
+ @dataclass
23
+ class ObjectDetectionInput(BaseInferenceType):
24
+ """Inputs for Object Detection inference"""
25
+
26
+ inputs: Any
27
+ """The input image data"""
28
+ parameters: Optional[ObjectDetectionParameters] = None
29
+ """Additional inference parameters"""
30
+
31
+
32
+ @dataclass
33
+ class ObjectDetectionBoundingBox(BaseInferenceType):
34
+ """The predicted bounding box. Coordinates are relative to the top left corner of the input
35
+ image.
36
+ """
37
+
38
+ xmax: int
39
+ xmin: int
40
+ ymax: int
41
+ ymin: int
42
+
43
+
44
+ @dataclass
45
+ class ObjectDetectionOutputElement(BaseInferenceType):
46
+ """Outputs of inference for the Object Detection task"""
47
+
48
+ box: ObjectDetectionBoundingBox
49
+ """The predicted bounding box. Coordinates are relative to the top left corner of the input
50
+ image.
51
+ """
52
+ label: str
53
+ """The predicted label for the bounding box"""
54
+ score: float
55
+ """The associated score / probability"""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, Dict, Literal, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ SummarizationGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
13
+
14
+
15
+ @dataclass
16
+ class SummarizationGenerationParameters(BaseInferenceType):
17
+ """Additional inference parameters
18
+ Additional inference parameters for Text2text Generation
19
+ """
20
+
21
+ clean_up_tokenization_spaces: Optional[bool] = None
22
+ """Whether to clean up the potential extra spaces in the text output."""
23
+ generate_parameters: Optional[Dict[str, Any]] = None
24
+ """Additional parametrization of the text generation algorithm"""
25
+ truncation: Optional["SummarizationGenerationTruncationStrategy"] = None
26
+ """The truncation strategy to use"""
27
+
28
+
29
+ @dataclass
30
+ class SummarizationInput(BaseInferenceType):
31
+ """Inputs for Summarization inference
32
+ Inputs for Text2text Generation inference
33
+ """
34
+
35
+ inputs: str
36
+ """The input text data"""
37
+ parameters: Optional[SummarizationGenerationParameters] = None
38
+ """Additional inference parameters"""
39
+
40
+
41
+ @dataclass
42
+ class SummarizationOutput(BaseInferenceType):
43
+ """Outputs of inference for the Summarization task"""
44
+
45
+ summary_text: str
46
+ """The summarized text."""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, Dict, List, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class TableQuestionAnsweringInputData(BaseInferenceType):
14
+ """One (table, question) pair to answer"""
15
+
16
+ question: str
17
+ """The question to be answered about the table"""
18
+ table: Dict[str, List[str]]
19
+ """The table to serve as context for the questions"""
20
+
21
+
22
+ @dataclass
23
+ class TableQuestionAnsweringInput(BaseInferenceType):
24
+ """Inputs for Table Question Answering inference"""
25
+
26
+ inputs: TableQuestionAnsweringInputData
27
+ """One (table, question) pair to answer"""
28
+ parameters: Optional[Dict[str, Any]] = None
29
+ """Additional inference parameters"""
30
+
31
+
32
+ @dataclass
33
+ class TableQuestionAnsweringOutputElement(BaseInferenceType):
34
+ """Outputs of inference for the Table Question Answering task"""
35
+
36
+ answer: str
37
+ """The answer of the question given the table. If there is an aggregator, the answer will be
38
+ preceded by `AGGREGATOR >`.
39
+ """
40
+ cells: List[str]
41
+ """List of strings made up of the answer cell values."""
42
+ coordinates: List[List[int]]
43
+ """Coordinates of the cells of the answers."""
44
+ aggregator: Optional[str] = None
45
+ """If the model has an aggregator, this returns the aggregator."""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import List, Literal, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class TextGenerationParameters(BaseInferenceType):
14
+ """Additional inference parameters
15
+ Additional inference parameters for Text Generation
16
+ """
17
+
18
+ best_of: Optional[int] = None
19
+ """The number of sampling queries to run. Only the best one (in terms of total logprob) will
20
+ be returned.
21
+ """
22
+ decoder_input_details: Optional[bool] = None
23
+ """Whether or not to output decoder input details"""
24
+ details: Optional[bool] = None
25
+ """Whether or not to output details"""
26
+ do_sample: Optional[bool] = None
27
+ """Whether to use logits sampling instead of greedy decoding when generating new tokens."""
28
+ max_new_tokens: Optional[int] = None
29
+ """The maximum number of tokens to generate."""
30
+ repetition_penalty: Optional[float] = None
31
+ """The parameter for repetition penalty. A value of 1.0 means no penalty. See [this
32
+ paper](https://hf.co/papers/1909.05858) for more details.
33
+ """
34
+ return_full_text: Optional[bool] = None
35
+ """Whether to prepend the prompt to the generated text."""
36
+ seed: Optional[int] = None
37
+ """The random sampling seed."""
38
+ stop_sequences: Optional[List[str]] = None
39
+ """Stop generating tokens if a member of `stop_sequences` is generated."""
40
+ temperature: Optional[float] = None
41
+ """The value used to modulate the logits distribution."""
42
+ top_k: Optional[int] = None
43
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
44
+ top_p: Optional[float] = None
45
+ """If set to < 1, only the smallest set of most probable tokens with probabilities that add
46
+ up to `top_p` or higher are kept for generation.
47
+ """
48
+ truncate: Optional[int] = None
49
+ """Truncate input tokens to the given size."""
50
+ typical_p: Optional[float] = None
51
+ """Typical Decoding mass. See [Typical Decoding for Natural Language
52
+ Generation](https://hf.co/papers/2202.00666) for more information
53
+ """
54
+ watermark: Optional[bool] = None
55
+ """Watermarking with [A Watermark for Large Language Models](https://hf.co/papers/2301.10226)"""
56
+
57
+
58
+ @dataclass
59
+ class TextGenerationInput(BaseInferenceType):
60
+ """Inputs for Text Generation inference"""
61
+
62
+ inputs: str
63
+ """The text to initialize generation with"""
64
+ parameters: Optional[TextGenerationParameters] = None
65
+ """Additional inference parameters"""
66
+ stream: Optional[bool] = None
67
+ """Whether to stream output tokens"""
68
+
69
+
70
+ TextGenerationFinishReason = Literal["length", "eos_token", "stop_sequence"]
71
+
72
+
73
+ @dataclass
74
+ class TextGenerationPrefillToken(BaseInferenceType):
75
+ id: int
76
+ logprob: float
77
+ text: str
78
+ """The text associated with that token"""
79
+
80
+
81
+ @dataclass
82
+ class TextGenerationOutputToken(BaseInferenceType):
83
+ """Generated token."""
84
+
85
+ id: int
86
+ special: bool
87
+ """Whether or not that token is a special one"""
88
+ text: str
89
+ """The text associated with that token"""
90
+ logprob: Optional[float] = None
91
+
92
+
93
+ @dataclass
94
+ class TextGenerationOutputSequenceDetails(BaseInferenceType):
95
+ finish_reason: "TextGenerationFinishReason"
96
+ generated_text: str
97
+ """The generated text"""
98
+ generated_tokens: int
99
+ """The number of generated tokens"""
100
+ prefill: List[TextGenerationPrefillToken]
101
+ tokens: List[TextGenerationOutputToken]
102
+ """The generated tokens and associated details"""
103
+ seed: Optional[int] = None
104
+ """The random seed used for generation"""
105
+ top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
106
+ """Most likely tokens"""
107
+
108
+
109
+ @dataclass
110
+ class TextGenerationOutputDetails(BaseInferenceType):
111
+ """When enabled, details about the generation"""
112
+
113
+ finish_reason: "TextGenerationFinishReason"
114
+ """The reason why the generation was stopped."""
115
+ generated_tokens: int
116
+ """The number of generated tokens"""
117
+ prefill: List[TextGenerationPrefillToken]
118
+ tokens: List[TextGenerationOutputToken]
119
+ """The generated tokens and associated details"""
120
+ best_of_sequences: Optional[List[TextGenerationOutputSequenceDetails]] = None
121
+ """Details about additional sequences when best_of is provided"""
122
+ seed: Optional[int] = None
123
+ """The random seed used for generation"""
124
+ top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
125
+ """Most likely tokens"""
126
+
127
+
128
+ @dataclass
129
+ class TextGenerationOutput(BaseInferenceType):
130
+ """Outputs for Text Generation inference"""
131
+
132
+ generated_text: str
133
+ """The generated text"""
134
+ details: Optional[TextGenerationOutputDetails] = None
135
+ """When enabled, details about the generation"""
136
+
137
+
138
+ @dataclass
139
+ class TextGenerationStreamDetails(BaseInferenceType):
140
+ """Generation details. Only available when the generation is finished."""
141
+
142
+ finish_reason: "TextGenerationFinishReason"
143
+ """The reason why the generation was stopped."""
144
+ generated_tokens: int
145
+ """The number of generated tokens"""
146
+ seed: int
147
+ """The random seed used for generation"""
148
+
149
+
150
+ @dataclass
151
+ class TextGenerationStreamOutput(BaseInferenceType):
152
+ """Text Generation Stream Output"""
153
+
154
+ token: TextGenerationOutputToken
155
+ """Generated token."""
156
+ details: Optional[TextGenerationStreamDetails] = None
157
+ """Generation details. Only available when the generation is finished."""
158
+ generated_text: Optional[str] = None
159
+ """The complete generated text. Only available when the generation is finished."""
160
+ index: Optional[int] = None
161
+ """The token index within the stream. Optional to support older clients that omit it."""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, List, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class TextToImageTargetSize(BaseInferenceType):
14
+ """The size in pixel of the output image"""
15
+
16
+ height: int
17
+ width: int
18
+
19
+
20
+ @dataclass
21
+ class TextToImageParameters(BaseInferenceType):
22
+ """Additional inference parameters
23
+ Additional inference parameters for Text To Image
24
+ """
25
+
26
+ guidance_scale: Optional[float] = None
27
+ """For diffusion models. A higher guidance scale value encourages the model to generate
28
+ images closely linked to the text prompt at the expense of lower image quality.
29
+ """
30
+ negative_prompt: Optional[List[str]] = None
31
+ """One or several prompt to guide what NOT to include in image generation."""
32
+ num_inference_steps: Optional[int] = None
33
+ """For diffusion models. The number of denoising steps. More denoising steps usually lead to
34
+ a higher quality image at the expense of slower inference.
35
+ """
36
+ scheduler: Optional[str] = None
37
+ """For diffusion models. Override the scheduler with a compatible one"""
38
+ target_size: Optional[TextToImageTargetSize] = None
39
+ """The size in pixel of the output image"""
40
+
41
+
42
+ @dataclass
43
+ class TextToImageInput(BaseInferenceType):
44
+ """Inputs for Text To Image inference"""
45
+
46
+ inputs: str
47
+ """The input text data (sometimes called "prompt\""""
48
+ parameters: Optional[TextToImageParameters] = None
49
+ """Additional inference parameters"""
50
+
51
+
52
+ @dataclass
53
+ class TextToImageOutput(BaseInferenceType):
54
+ """Outputs of inference for the Text To Image task"""
55
+
56
+ image: Any
57
+ """The generated image"""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/token_classification.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, List, Literal, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ TokenClassificationAggregationStrategy = Literal["none", "simple", "first", "average", "max"]
13
+
14
+
15
+ @dataclass
16
+ class TokenClassificationParameters(BaseInferenceType):
17
+ """Additional inference parameters
18
+ Additional inference parameters for Token Classification
19
+ """
20
+
21
+ aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None
22
+ """The strategy used to fuse tokens based on model predictions"""
23
+ ignore_labels: Optional[List[str]] = None
24
+ """A list of labels to ignore"""
25
+ stride: Optional[int] = None
26
+ """The number of overlapping tokens between chunks when splitting the input text."""
27
+
28
+
29
+ @dataclass
30
+ class TokenClassificationInput(BaseInferenceType):
31
+ """Inputs for Token Classification inference"""
32
+
33
+ inputs: str
34
+ """The input text data"""
35
+ parameters: Optional[TokenClassificationParameters] = None
36
+ """Additional inference parameters"""
37
+
38
+
39
+ @dataclass
40
+ class TokenClassificationOutputElement(BaseInferenceType):
41
+ """Outputs of inference for the Token Classification task"""
42
+
43
+ label: Any
44
+ score: float
45
+ """The associated score / probability"""
46
+ end: Optional[int] = None
47
+ """The character position in the input where this group ends."""
48
+ entity_group: Optional[str] = None
49
+ """The predicted label for that group of tokens"""
50
+ start: Optional[int] = None
51
+ """The character position in the input where this group begins."""
52
+ word: Optional[str] = None
53
+ """The corresponding text"""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, Optional
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ @dataclass
13
+ class VisualQuestionAnsweringInputData(BaseInferenceType):
14
+ """One (image, question) pair to answer"""
15
+
16
+ image: Any
17
+ """The image."""
18
+ question: Any
19
+ """The question to answer based on the image."""
20
+
21
+
22
+ @dataclass
23
+ class VisualQuestionAnsweringParameters(BaseInferenceType):
24
+ """Additional inference parameters
25
+ Additional inference parameters for Visual Question Answering
26
+ """
27
+
28
+ top_k: Optional[int] = None
29
+ """The number of answers to return (will be chosen by order of likelihood). Note that we
30
+ return less than topk answers if there are not enough options available within the
31
+ context.
32
+ """
33
+
34
+
35
+ @dataclass
36
+ class VisualQuestionAnsweringInput(BaseInferenceType):
37
+ """Inputs for Visual Question Answering inference"""
38
+
39
+ inputs: VisualQuestionAnsweringInputData
40
+ """One (image, question) pair to answer"""
41
+ parameters: Optional[VisualQuestionAnsweringParameters] = None
42
+ """Additional inference parameters"""
43
+
44
+
45
+ @dataclass
46
+ class VisualQuestionAnsweringOutputElement(BaseInferenceType):
47
+ """Outputs of inference for the Visual Question Answering task"""
48
+
49
+ label: Any
50
+ score: float
51
+ """The associated score / probability"""
52
+ answer: Optional[str] = None
53
+ """The answer to the question"""
venv/lib/python3.10/site-packages/huggingface_hub/inference/_templating.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import Callable, Dict, List, Optional, Union
3
+
4
+ from ..utils import HfHubHTTPError, RepositoryNotFoundError, is_minijinja_available
5
+
6
+
7
+ class TemplateError(Exception):
8
+ """Any error raised while trying to fetch or render a chat template."""
9
+
10
+
11
+ def _import_minijinja():
12
+ if not is_minijinja_available():
13
+ raise ImportError("Cannot render template. Please install minijinja using `pip install minijinja`.")
14
+ import minijinja # noqa: F401
15
+
16
+ return minijinja
17
+
18
+
19
+ def render_chat_prompt(
20
+ *,
21
+ model_id: str,
22
+ messages: List[Dict[str, str]],
23
+ token: Union[str, bool, None] = None,
24
+ add_generation_prompt: bool = True,
25
+ **kwargs,
26
+ ) -> str:
27
+ """Render a chat prompt using a model's chat template.
28
+
29
+ Args:
30
+ model_id (`str`):
31
+ The model id.
32
+ messages (`List[Dict[str, str]]`):
33
+ The list of messages to render.
34
+ token (`str` or `bool`, *optional*):
35
+ Hugging Face token. Will default to the locally saved token if not provided.
36
+
37
+ Returns:
38
+ `str`: The rendered chat prompt.
39
+
40
+ Raises:
41
+ `TemplateError`: If there's any issue while fetching, compiling or rendering the chat template.
42
+ """
43
+ minijinja = _import_minijinja()
44
+ template = _fetch_and_compile_template(model_id=model_id, token=token)
45
+
46
+ try:
47
+ return template(messages=messages, add_generation_prompt=add_generation_prompt, **kwargs)
48
+ except minijinja.TemplateError as e:
49
+ raise TemplateError(f"Error while trying to render chat prompt for model '{model_id}': {e}") from e
50
+
51
+
52
+ @lru_cache # TODO: lru_cache for raised exceptions
53
+ def _fetch_and_compile_template(*, model_id: str, token: Union[str, None]) -> Callable:
54
+ """Fetch and compile a model's chat template.
55
+
56
+ Method is cached to avoid fetching the same model's config multiple times.
57
+
58
+ Args:
59
+ model_id (`str`):
60
+ The model id.
61
+ token (`str` or `bool`, *optional*):
62
+ Hugging Face token. Will default to the locally saved token if not provided.
63
+
64
+ Returns:
65
+ `Callable`: A callable that takes a list of messages and returns the rendered chat prompt.
66
+ """
67
+ from huggingface_hub.hf_api import HfApi
68
+
69
+ minijinja = _import_minijinja()
70
+
71
+ # 1. fetch config from API
72
+ try:
73
+ config = HfApi(token=token).model_info(model_id).config
74
+ except RepositoryNotFoundError as e:
75
+ raise TemplateError(f"Cannot render chat template: model '{model_id}' not found.") from e
76
+ except HfHubHTTPError as e:
77
+ raise TemplateError(f"Error while trying to fetch chat template for model '{model_id}': {e}") from e
78
+
79
+ # 2. check config validity
80
+ if config is None:
81
+ raise TemplateError(f"Config not found for model '{model_id}'.")
82
+ tokenizer_config = config.get("tokenizer_config")
83
+ if tokenizer_config is None:
84
+ raise TemplateError(f"Tokenizer config not found for model '{model_id}'.")
85
+ if tokenizer_config.get("chat_template") is None:
86
+ raise TemplateError(f"Chat template not found in tokenizer_config for model '{model_id}'.")
87
+ chat_template = tokenizer_config["chat_template"]
88
+ if not isinstance(chat_template, str):
89
+ raise TemplateError(f"Chat template must be a string, not '{type(chat_template)}' (model: {model_id}).")
90
+
91
+ special_tokens: Dict[str, Optional[str]] = {}
92
+ for key, value in tokenizer_config.items():
93
+ if "token" in key:
94
+ if isinstance(value, str):
95
+ special_tokens[key] = value
96
+ elif isinstance(value, dict) and value.get("__type") == "AddedToken":
97
+ special_tokens[key] = value.get("content")
98
+
99
+ # 3. compile template and return
100
+ env = minijinja.Environment()
101
+ try:
102
+ env.add_template("chat_template", chat_template)
103
+ except minijinja.TemplateError as e:
104
+ raise TemplateError(f"Error while trying to compile chat template for model '{model_id}': {e}") from e
105
+ return lambda **kwargs: env.render_template("chat_template", **kwargs, **special_tokens)
venv/lib/python3.10/site-packages/huggingface_hub/inference/_types.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import List, TypedDict
17
+
18
+
19
+ # Legacy types
20
+ # Types are now generated from the JSON schema spec in @huggingface/tasks.
21
+ # See ./src/huggingface_hub/inference/_generated/types
22
+
23
+
24
+ class ConversationalOutputConversation(TypedDict):
25
+ """Dictionary containing the "conversation" part of a [`~InferenceClient.conversational`] task.
26
+
27
+ Args:
28
+ generated_responses (`List[str]`):
29
+ A list of the responses from the model.
30
+ past_user_inputs (`List[str]`):
31
+ A list of the inputs from the user. Must be the same length as `generated_responses`.
32
+ """
33
+
34
+ generated_responses: List[str]
35
+ past_user_inputs: List[str]
36
+
37
+
38
+ class ConversationalOutput(TypedDict):
39
+ """Dictionary containing the output of a [`~InferenceClient.conversational`] task.
40
+
41
+ Args:
42
+ generated_text (`str`):
43
+ The last response from the model.
44
+ conversation (`ConversationalOutputConversation`):
45
+ The past conversation.
46
+ warnings (`List[str]`):
47
+ A list of warnings associated with the process.
48
+ """
49
+
50
+ conversation: ConversationalOutputConversation
51
+ generated_text: str
52
+ warnings: List[str]
venv/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License
16
+
17
+ # ruff: noqa: F401
18
+
19
+ from . import tqdm as _tqdm # _tqdm is the module
20
+ from ._cache_assets import cached_assets_path
21
+ from ._cache_manager import (
22
+ CachedFileInfo,
23
+ CachedRepoInfo,
24
+ CachedRevisionInfo,
25
+ CacheNotFound,
26
+ CorruptedCacheException,
27
+ DeleteCacheStrategy,
28
+ HFCacheInfo,
29
+ scan_cache_dir,
30
+ )
31
+ from ._chunk_utils import chunk_iterable
32
+ from ._datetime import parse_datetime
33
+ from ._errors import (
34
+ BadRequestError,
35
+ DisabledRepoError,
36
+ EntryNotFoundError,
37
+ FileMetadataError,
38
+ GatedRepoError,
39
+ HfHubHTTPError,
40
+ LocalEntryNotFoundError,
41
+ RepositoryNotFoundError,
42
+ RevisionNotFoundError,
43
+ hf_raise_for_status,
44
+ )
45
+ from ._experimental import experimental
46
+ from ._fixes import SoftTemporaryDirectory, WeakFileLock, yaml_dump
47
+ from ._git_credential import list_credential_helpers, set_git_credential, unset_git_credential
48
+ from ._headers import LocalTokenNotFoundError, build_hf_headers, get_token_to_send
49
+ from ._hf_folder import HfFolder
50
+ from ._http import (
51
+ OfflineModeIsEnabled,
52
+ configure_http_backend,
53
+ fix_hf_endpoint_in_url,
54
+ get_session,
55
+ http_backoff,
56
+ reset_sessions,
57
+ )
58
+ from ._pagination import paginate
59
+ from ._paths import IGNORE_GIT_FOLDER_PATTERNS, filter_repo_objects
60
+ from ._runtime import (
61
+ dump_environment_info,
62
+ get_aiohttp_version,
63
+ get_fastai_version,
64
+ get_fastcore_version,
65
+ get_gradio_version,
66
+ get_graphviz_version,
67
+ get_hf_hub_version,
68
+ get_hf_transfer_version,
69
+ get_jinja_version,
70
+ get_minijinja_version,
71
+ get_numpy_version,
72
+ get_pillow_version,
73
+ get_pydantic_version,
74
+ get_pydot_version,
75
+ get_python_version,
76
+ get_tensorboard_version,
77
+ get_tf_version,
78
+ get_torch_version,
79
+ is_aiohttp_available,
80
+ is_fastai_available,
81
+ is_fastcore_available,
82
+ is_google_colab,
83
+ is_gradio_available,
84
+ is_graphviz_available,
85
+ is_hf_transfer_available,
86
+ is_jinja_available,
87
+ is_minijinja_available,
88
+ is_notebook,
89
+ is_numpy_available,
90
+ is_package_available,
91
+ is_pillow_available,
92
+ is_pydantic_available,
93
+ is_pydot_available,
94
+ is_safetensors_available,
95
+ is_tensorboard_available,
96
+ is_tf_available,
97
+ is_torch_available,
98
+ )
99
+ from ._safetensors import (
100
+ NotASafetensorsRepoError,
101
+ SafetensorsFileMetadata,
102
+ SafetensorsParsingError,
103
+ SafetensorsRepoMetadata,
104
+ TensorInfo,
105
+ )
106
+ from ._subprocess import capture_output, run_interactive_subprocess, run_subprocess
107
+ from ._telemetry import send_telemetry
108
+ from ._token import get_token
109
+ from ._typing import is_jsonable
110
+ from ._validators import (
111
+ HFValidationError,
112
+ smoothly_deprecate_use_auth_token,
113
+ validate_hf_hub_args,
114
+ validate_repo_id,
115
+ )
116
+ from .tqdm import (
117
+ are_progress_bars_disabled,
118
+ disable_progress_bars,
119
+ enable_progress_bars,
120
+ tqdm,
121
+ tqdm_stream_file,
122
+ )
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc ADDED
Binary file (8.33 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc ADDED
Binary file (423 Bytes). View file
 
venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from pathlib import Path
16
+ from typing import Union
17
+
18
+ from ..constants import HF_ASSETS_CACHE
19
+
20
+
21
+ def cached_assets_path(
22
+ library_name: str,
23
+ namespace: str = "default",
24
+ subfolder: str = "default",
25
+ *,
26
+ assets_dir: Union[str, Path, None] = None,
27
+ ):
28
+ """Return a folder path to cache arbitrary files.
29
+
30
+ `huggingface_hub` provides a canonical folder path to store assets. This is the
31
+ recommended way to integrate cache in a downstream library as it will benefit from
32
+ the builtins tools to scan and delete the cache properly.
33
+
34
+ The distinction is made between files cached from the Hub and assets. Files from the
35
+ Hub are cached in a git-aware manner and entirely managed by `huggingface_hub`. See
36
+ [related documentation](https://huggingface.co/docs/huggingface_hub/how-to-cache).
37
+ All other files that a downstream library caches are considered to be "assets"
38
+ (files downloaded from external sources, extracted from a .tar archive, preprocessed
39
+ for training,...).
40
+
41
+ Once the folder path is generated, it is guaranteed to exist and to be a directory.
42
+ The path is based on 3 levels of depth: the library name, a namespace and a
43
+ subfolder. Those 3 levels grants flexibility while allowing `huggingface_hub` to
44
+ expect folders when scanning/deleting parts of the assets cache. Within a library,
45
+ it is expected that all namespaces share the same subset of subfolder names but this
46
+ is not a mandatory rule. The downstream library has then full control on which file
47
+ structure to adopt within its cache. Namespace and subfolder are optional (would
48
+ default to a `"default/"` subfolder) but library name is mandatory as we want every
49
+ downstream library to manage its own cache.
50
+
51
+ Expected tree:
52
+ ```text
53
+ assets/
54
+ └── datasets/
55
+ │ ├── SQuAD/
56
+ │ │ ├── downloaded/
57
+ │ │ ├── extracted/
58
+ │ │ └── processed/
59
+ │ ├── Helsinki-NLP--tatoeba_mt/
60
+ │ ├── downloaded/
61
+ │ ├── extracted/
62
+ │ └── processed/
63
+ └── transformers/
64
+ ├── default/
65
+ │ ├── something/
66
+ ├── bert-base-cased/
67
+ │ ├── default/
68
+ │ └── training/
69
+ hub/
70
+ └── models--julien-c--EsperBERTo-small/
71
+ ├── blobs/
72
+ │ ├── (...)
73
+ │ ├── (...)
74
+ ├── refs/
75
+ │ └── (...)
76
+ └── [ 128] snapshots/
77
+ ├── 2439f60ef33a0d46d85da5001d52aeda5b00ce9f/
78
+ │ ├── (...)
79
+ └── bbc77c8132af1cc5cf678da3f1ddf2de43606d48/
80
+ └── (...)
81
+ ```
82
+
83
+
84
+ Args:
85
+ library_name (`str`):
86
+ Name of the library that will manage the cache folder. Example: `"dataset"`.
87
+ namespace (`str`, *optional*, defaults to "default"):
88
+ Namespace to which the data belongs. Example: `"SQuAD"`.
89
+ subfolder (`str`, *optional*, defaults to "default"):
90
+ Subfolder in which the data will be stored. Example: `extracted`.
91
+ assets_dir (`str`, `Path`, *optional*):
92
+ Path to the folder where assets are cached. This must not be the same folder
93
+ where Hub files are cached. Defaults to `HF_HOME / "assets"` if not provided.
94
+ Can also be set with `HF_ASSETS_CACHE` environment variable.
95
+
96
+ Returns:
97
+ Path to the cache folder (`Path`).
98
+
99
+ Example:
100
+ ```py
101
+ >>> from huggingface_hub import cached_assets_path
102
+
103
+ >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="download")
104
+ PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/download')
105
+
106
+ >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="extracted")
107
+ PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/extracted')
108
+
109
+ >>> cached_assets_path(library_name="datasets", namespace="Helsinki-NLP/tatoeba_mt")
110
+ PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/Helsinki-NLP--tatoeba_mt/default')
111
+
112
+ >>> cached_assets_path(library_name="datasets", assets_dir="/tmp/tmp123456")
113
+ PosixPath('/tmp/tmp123456/datasets/default/default')
114
+ ```
115
+ """
116
+ # Resolve assets_dir
117
+ if assets_dir is None:
118
+ assets_dir = HF_ASSETS_CACHE
119
+ assets_dir = Path(assets_dir).expanduser().resolve()
120
+
121
+ # Avoid names that could create path issues
122
+ for part in (" ", "/", "\\"):
123
+ library_name = library_name.replace(part, "--")
124
+ namespace = namespace.replace(part, "--")
125
+ subfolder = subfolder.replace(part, "--")
126
+
127
+ # Path to subfolder is created
128
+ path = assets_dir / library_name / namespace / subfolder
129
+ try:
130
+ path.mkdir(exist_ok=True, parents=True)
131
+ except (FileExistsError, NotADirectoryError):
132
+ raise ValueError(f"Corrupted assets folder: cannot create directory because of an existing file ({path}).")
133
+
134
+ # Return
135
+ return path
venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py ADDED
@@ -0,0 +1,813 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to manage the HF cache directory."""
16
+
17
+ import os
18
+ import shutil
19
+ import time
20
+ from collections import defaultdict
21
+ from dataclasses import dataclass
22
+ from pathlib import Path
23
+ from typing import Dict, FrozenSet, List, Literal, Optional, Set, Union
24
+
25
+ from ..constants import HF_HUB_CACHE
26
+ from . import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ REPO_TYPE_T = Literal["model", "dataset", "space"]
32
+
33
+ # List of OS-created helper files that need to be ignored
34
+ FILES_TO_IGNORE = [".DS_Store"]
35
+
36
+
37
+ class CacheNotFound(Exception):
38
+ """Exception thrown when the Huggingface cache is not found."""
39
+
40
+ cache_dir: Union[str, Path]
41
+
42
+ def __init__(self, msg: str, cache_dir: Union[str, Path], *args, **kwargs):
43
+ super().__init__(msg, *args, **kwargs)
44
+ self.cache_dir = cache_dir
45
+
46
+
47
+ class CorruptedCacheException(Exception):
48
+ """Exception for any unexpected structure in the Huggingface cache-system."""
49
+
50
+
51
+ @dataclass(frozen=True)
52
+ class CachedFileInfo:
53
+ """Frozen data structure holding information about a single cached file.
54
+
55
+ Args:
56
+ file_name (`str`):
57
+ Name of the file. Example: `config.json`.
58
+ file_path (`Path`):
59
+ Path of the file in the `snapshots` directory. The file path is a symlink
60
+ referring to a blob in the `blobs` folder.
61
+ blob_path (`Path`):
62
+ Path of the blob file. This is equivalent to `file_path.resolve()`.
63
+ size_on_disk (`int`):
64
+ Size of the blob file in bytes.
65
+ blob_last_accessed (`float`):
66
+ Timestamp of the last time the blob file has been accessed (from any
67
+ revision).
68
+ blob_last_modified (`float`):
69
+ Timestamp of the last time the blob file has been modified/created.
70
+
71
+ <Tip warning={true}>
72
+
73
+ `blob_last_accessed` and `blob_last_modified` reliability can depend on the OS you
74
+ are using. See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
75
+ for more details.
76
+
77
+ </Tip>
78
+ """
79
+
80
+ file_name: str
81
+ file_path: Path
82
+ blob_path: Path
83
+ size_on_disk: int
84
+
85
+ blob_last_accessed: float
86
+ blob_last_modified: float
87
+
88
+ @property
89
+ def blob_last_accessed_str(self) -> str:
90
+ """
91
+ (property) Timestamp of the last time the blob file has been accessed (from any
92
+ revision), returned as a human-readable string.
93
+
94
+ Example: "2 weeks ago".
95
+ """
96
+ return _format_timesince(self.blob_last_accessed)
97
+
98
+ @property
99
+ def blob_last_modified_str(self) -> str:
100
+ """
101
+ (property) Timestamp of the last time the blob file has been modified, returned
102
+ as a human-readable string.
103
+
104
+ Example: "2 weeks ago".
105
+ """
106
+ return _format_timesince(self.blob_last_modified)
107
+
108
+ @property
109
+ def size_on_disk_str(self) -> str:
110
+ """
111
+ (property) Size of the blob file as a human-readable string.
112
+
113
+ Example: "42.2K".
114
+ """
115
+ return _format_size(self.size_on_disk)
116
+
117
+
118
+ @dataclass(frozen=True)
119
+ class CachedRevisionInfo:
120
+ """Frozen data structure holding information about a revision.
121
+
122
+ A revision correspond to a folder in the `snapshots` folder and is populated with
123
+ the exact tree structure as the repo on the Hub but contains only symlinks. A
124
+ revision can be either referenced by 1 or more `refs` or be "detached" (no refs).
125
+
126
+ Args:
127
+ commit_hash (`str`):
128
+ Hash of the revision (unique).
129
+ Example: `"9338f7b671827df886678df2bdd7cc7b4f36dffd"`.
130
+ snapshot_path (`Path`):
131
+ Path to the revision directory in the `snapshots` folder. It contains the
132
+ exact tree structure as the repo on the Hub.
133
+ files: (`FrozenSet[CachedFileInfo]`):
134
+ Set of [`~CachedFileInfo`] describing all files contained in the snapshot.
135
+ refs (`FrozenSet[str]`):
136
+ Set of `refs` pointing to this revision. If the revision has no `refs`, it
137
+ is considered detached.
138
+ Example: `{"main", "2.4.0"}` or `{"refs/pr/1"}`.
139
+ size_on_disk (`int`):
140
+ Sum of the blob file sizes that are symlink-ed by the revision.
141
+ last_modified (`float`):
142
+ Timestamp of the last time the revision has been created/modified.
143
+
144
+ <Tip warning={true}>
145
+
146
+ `last_accessed` cannot be determined correctly on a single revision as blob files
147
+ are shared across revisions.
148
+
149
+ </Tip>
150
+
151
+ <Tip warning={true}>
152
+
153
+ `size_on_disk` is not necessarily the sum of all file sizes because of possible
154
+ duplicated files. Besides, only blobs are taken into account, not the (negligible)
155
+ size of folders and symlinks.
156
+
157
+ </Tip>
158
+ """
159
+
160
+ commit_hash: str
161
+ snapshot_path: Path
162
+ size_on_disk: int
163
+ files: FrozenSet[CachedFileInfo]
164
+ refs: FrozenSet[str]
165
+
166
+ last_modified: float
167
+
168
+ @property
169
+ def last_modified_str(self) -> str:
170
+ """
171
+ (property) Timestamp of the last time the revision has been modified, returned
172
+ as a human-readable string.
173
+
174
+ Example: "2 weeks ago".
175
+ """
176
+ return _format_timesince(self.last_modified)
177
+
178
+ @property
179
+ def size_on_disk_str(self) -> str:
180
+ """
181
+ (property) Sum of the blob file sizes as a human-readable string.
182
+
183
+ Example: "42.2K".
184
+ """
185
+ return _format_size(self.size_on_disk)
186
+
187
+ @property
188
+ def nb_files(self) -> int:
189
+ """
190
+ (property) Total number of files in the revision.
191
+ """
192
+ return len(self.files)
193
+
194
+
195
+ @dataclass(frozen=True)
196
+ class CachedRepoInfo:
197
+ """Frozen data structure holding information about a cached repository.
198
+
199
+ Args:
200
+ repo_id (`str`):
201
+ Repo id of the repo on the Hub. Example: `"google/fleurs"`.
202
+ repo_type (`Literal["dataset", "model", "space"]`):
203
+ Type of the cached repo.
204
+ repo_path (`Path`):
205
+ Local path to the cached repo.
206
+ size_on_disk (`int`):
207
+ Sum of the blob file sizes in the cached repo.
208
+ nb_files (`int`):
209
+ Total number of blob files in the cached repo.
210
+ revisions (`FrozenSet[CachedRevisionInfo]`):
211
+ Set of [`~CachedRevisionInfo`] describing all revisions cached in the repo.
212
+ last_accessed (`float`):
213
+ Timestamp of the last time a blob file of the repo has been accessed.
214
+ last_modified (`float`):
215
+ Timestamp of the last time a blob file of the repo has been modified/created.
216
+
217
+ <Tip warning={true}>
218
+
219
+ `size_on_disk` is not necessarily the sum of all revisions sizes because of
220
+ duplicated files. Besides, only blobs are taken into account, not the (negligible)
221
+ size of folders and symlinks.
222
+
223
+ </Tip>
224
+
225
+ <Tip warning={true}>
226
+
227
+ `last_accessed` and `last_modified` reliability can depend on the OS you are using.
228
+ See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
229
+ for more details.
230
+
231
+ </Tip>
232
+ """
233
+
234
+ repo_id: str
235
+ repo_type: REPO_TYPE_T
236
+ repo_path: Path
237
+ size_on_disk: int
238
+ nb_files: int
239
+ revisions: FrozenSet[CachedRevisionInfo]
240
+
241
+ last_accessed: float
242
+ last_modified: float
243
+
244
+ @property
245
+ def last_accessed_str(self) -> str:
246
+ """
247
+ (property) Last time a blob file of the repo has been accessed, returned as a
248
+ human-readable string.
249
+
250
+ Example: "2 weeks ago".
251
+ """
252
+ return _format_timesince(self.last_accessed)
253
+
254
+ @property
255
+ def last_modified_str(self) -> str:
256
+ """
257
+ (property) Last time a blob file of the repo has been modified, returned as a
258
+ human-readable string.
259
+
260
+ Example: "2 weeks ago".
261
+ """
262
+ return _format_timesince(self.last_modified)
263
+
264
+ @property
265
+ def size_on_disk_str(self) -> str:
266
+ """
267
+ (property) Sum of the blob file sizes as a human-readable string.
268
+
269
+ Example: "42.2K".
270
+ """
271
+ return _format_size(self.size_on_disk)
272
+
273
+ @property
274
+ def refs(self) -> Dict[str, CachedRevisionInfo]:
275
+ """
276
+ (property) Mapping between `refs` and revision data structures.
277
+ """
278
+ return {ref: revision for revision in self.revisions for ref in revision.refs}
279
+
280
+
281
+ @dataclass(frozen=True)
282
+ class DeleteCacheStrategy:
283
+ """Frozen data structure holding the strategy to delete cached revisions.
284
+
285
+ This object is not meant to be instantiated programmatically but to be returned by
286
+ [`~utils.HFCacheInfo.delete_revisions`]. See documentation for usage example.
287
+
288
+ Args:
289
+ expected_freed_size (`float`):
290
+ Expected freed size once strategy is executed.
291
+ blobs (`FrozenSet[Path]`):
292
+ Set of blob file paths to be deleted.
293
+ refs (`FrozenSet[Path]`):
294
+ Set of reference file paths to be deleted.
295
+ repos (`FrozenSet[Path]`):
296
+ Set of entire repo paths to be deleted.
297
+ snapshots (`FrozenSet[Path]`):
298
+ Set of snapshots to be deleted (directory of symlinks).
299
+ """
300
+
301
+ expected_freed_size: int
302
+ blobs: FrozenSet[Path]
303
+ refs: FrozenSet[Path]
304
+ repos: FrozenSet[Path]
305
+ snapshots: FrozenSet[Path]
306
+
307
+ @property
308
+ def expected_freed_size_str(self) -> str:
309
+ """
310
+ (property) Expected size that will be freed as a human-readable string.
311
+
312
+ Example: "42.2K".
313
+ """
314
+ return _format_size(self.expected_freed_size)
315
+
316
+ def execute(self) -> None:
317
+ """Execute the defined strategy.
318
+
319
+ <Tip warning={true}>
320
+
321
+ If this method is interrupted, the cache might get corrupted. Deletion order is
322
+ implemented so that references and symlinks are deleted before the actual blob
323
+ files.
324
+
325
+ </Tip>
326
+
327
+ <Tip warning={true}>
328
+
329
+ This method is irreversible. If executed, cached files are erased and must be
330
+ downloaded again.
331
+
332
+ </Tip>
333
+ """
334
+ # Deletion order matters. Blobs are deleted in last so that the user can't end
335
+ # up in a state where a `ref`` refers to a missing snapshot or a snapshot
336
+ # symlink refers to a deleted blob.
337
+
338
+ # Delete entire repos
339
+ for path in self.repos:
340
+ _try_delete_path(path, path_type="repo")
341
+
342
+ # Delete snapshot directories
343
+ for path in self.snapshots:
344
+ _try_delete_path(path, path_type="snapshot")
345
+
346
+ # Delete refs files
347
+ for path in self.refs:
348
+ _try_delete_path(path, path_type="ref")
349
+
350
+ # Delete blob files
351
+ for path in self.blobs:
352
+ _try_delete_path(path, path_type="blob")
353
+
354
+ logger.info(f"Cache deletion done. Saved {self.expected_freed_size_str}.")
355
+
356
+
357
+ @dataclass(frozen=True)
358
+ class HFCacheInfo:
359
+ """Frozen data structure holding information about the entire cache-system.
360
+
361
+ This data structure is returned by [`scan_cache_dir`] and is immutable.
362
+
363
+ Args:
364
+ size_on_disk (`int`):
365
+ Sum of all valid repo sizes in the cache-system.
366
+ repos (`FrozenSet[CachedRepoInfo]`):
367
+ Set of [`~CachedRepoInfo`] describing all valid cached repos found on the
368
+ cache-system while scanning.
369
+ warnings (`List[CorruptedCacheException]`):
370
+ List of [`~CorruptedCacheException`] that occurred while scanning the cache.
371
+ Those exceptions are captured so that the scan can continue. Corrupted repos
372
+ are skipped from the scan.
373
+
374
+ <Tip warning={true}>
375
+
376
+ Here `size_on_disk` is equal to the sum of all repo sizes (only blobs). However if
377
+ some cached repos are corrupted, their sizes are not taken into account.
378
+
379
+ </Tip>
380
+ """
381
+
382
+ size_on_disk: int
383
+ repos: FrozenSet[CachedRepoInfo]
384
+ warnings: List[CorruptedCacheException]
385
+
386
+ @property
387
+ def size_on_disk_str(self) -> str:
388
+ """
389
+ (property) Sum of all valid repo sizes in the cache-system as a human-readable
390
+ string.
391
+
392
+ Example: "42.2K".
393
+ """
394
+ return _format_size(self.size_on_disk)
395
+
396
+ def delete_revisions(self, *revisions: str) -> DeleteCacheStrategy:
397
+ """Prepare the strategy to delete one or more revisions cached locally.
398
+
399
+ Input revisions can be any revision hash. If a revision hash is not found in the
400
+ local cache, a warning is thrown but no error is raised. Revisions can be from
401
+ different cached repos since hashes are unique across repos,
402
+
403
+ Examples:
404
+ ```py
405
+ >>> from huggingface_hub import scan_cache_dir
406
+ >>> cache_info = scan_cache_dir()
407
+ >>> delete_strategy = cache_info.delete_revisions(
408
+ ... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa"
409
+ ... )
410
+ >>> print(f"Will free {delete_strategy.expected_freed_size_str}.")
411
+ Will free 7.9K.
412
+ >>> delete_strategy.execute()
413
+ Cache deletion done. Saved 7.9K.
414
+ ```
415
+
416
+ ```py
417
+ >>> from huggingface_hub import scan_cache_dir
418
+ >>> scan_cache_dir().delete_revisions(
419
+ ... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa",
420
+ ... "e2983b237dccf3ab4937c97fa717319a9ca1a96d",
421
+ ... "6c0e6080953db56375760c0471a8c5f2929baf11",
422
+ ... ).execute()
423
+ Cache deletion done. Saved 8.6G.
424
+ ```
425
+
426
+ <Tip warning={true}>
427
+
428
+ `delete_revisions` returns a [`~utils.DeleteCacheStrategy`] object that needs to
429
+ be executed. The [`~utils.DeleteCacheStrategy`] is not meant to be modified but
430
+ allows having a dry run before actually executing the deletion.
431
+
432
+ </Tip>
433
+ """
434
+ hashes_to_delete: Set[str] = set(revisions)
435
+
436
+ repos_with_revisions: Dict[CachedRepoInfo, Set[CachedRevisionInfo]] = defaultdict(set)
437
+
438
+ for repo in self.repos:
439
+ for revision in repo.revisions:
440
+ if revision.commit_hash in hashes_to_delete:
441
+ repos_with_revisions[repo].add(revision)
442
+ hashes_to_delete.remove(revision.commit_hash)
443
+
444
+ if len(hashes_to_delete) > 0:
445
+ logger.warning(f"Revision(s) not found - cannot delete them: {', '.join(hashes_to_delete)}")
446
+
447
+ delete_strategy_blobs: Set[Path] = set()
448
+ delete_strategy_refs: Set[Path] = set()
449
+ delete_strategy_repos: Set[Path] = set()
450
+ delete_strategy_snapshots: Set[Path] = set()
451
+ delete_strategy_expected_freed_size = 0
452
+
453
+ for affected_repo, revisions_to_delete in repos_with_revisions.items():
454
+ other_revisions = affected_repo.revisions - revisions_to_delete
455
+
456
+ # If no other revisions, it means all revisions are deleted
457
+ # -> delete the entire cached repo
458
+ if len(other_revisions) == 0:
459
+ delete_strategy_repos.add(affected_repo.repo_path)
460
+ delete_strategy_expected_freed_size += affected_repo.size_on_disk
461
+ continue
462
+
463
+ # Some revisions of the repo will be deleted but not all. We need to filter
464
+ # which blob files will not be linked anymore.
465
+ for revision_to_delete in revisions_to_delete:
466
+ # Snapshot dir
467
+ delete_strategy_snapshots.add(revision_to_delete.snapshot_path)
468
+
469
+ # Refs dir
470
+ for ref in revision_to_delete.refs:
471
+ delete_strategy_refs.add(affected_repo.repo_path / "refs" / ref)
472
+
473
+ # Blobs dir
474
+ for file in revision_to_delete.files:
475
+ if file.blob_path not in delete_strategy_blobs:
476
+ is_file_alone = True
477
+ for revision in other_revisions:
478
+ for rev_file in revision.files:
479
+ if file.blob_path == rev_file.blob_path:
480
+ is_file_alone = False
481
+ break
482
+ if not is_file_alone:
483
+ break
484
+
485
+ # Blob file not referenced by remaining revisions -> delete
486
+ if is_file_alone:
487
+ delete_strategy_blobs.add(file.blob_path)
488
+ delete_strategy_expected_freed_size += file.size_on_disk
489
+
490
+ # Return the strategy instead of executing it.
491
+ return DeleteCacheStrategy(
492
+ blobs=frozenset(delete_strategy_blobs),
493
+ refs=frozenset(delete_strategy_refs),
494
+ repos=frozenset(delete_strategy_repos),
495
+ snapshots=frozenset(delete_strategy_snapshots),
496
+ expected_freed_size=delete_strategy_expected_freed_size,
497
+ )
498
+
499
+
500
+ def scan_cache_dir(cache_dir: Optional[Union[str, Path]] = None) -> HFCacheInfo:
501
+ """Scan the entire HF cache-system and return a [`~HFCacheInfo`] structure.
502
+
503
+ Use `scan_cache_dir` in order to programmatically scan your cache-system. The cache
504
+ will be scanned repo by repo. If a repo is corrupted, a [`~CorruptedCacheException`]
505
+ will be thrown internally but captured and returned in the [`~HFCacheInfo`]
506
+ structure. Only valid repos get a proper report.
507
+
508
+ ```py
509
+ >>> from huggingface_hub import scan_cache_dir
510
+
511
+ >>> hf_cache_info = scan_cache_dir()
512
+ HFCacheInfo(
513
+ size_on_disk=3398085269,
514
+ repos=frozenset({
515
+ CachedRepoInfo(
516
+ repo_id='t5-small',
517
+ repo_type='model',
518
+ repo_path=PosixPath(...),
519
+ size_on_disk=970726914,
520
+ nb_files=11,
521
+ revisions=frozenset({
522
+ CachedRevisionInfo(
523
+ commit_hash='d78aea13fa7ecd06c29e3e46195d6341255065d5',
524
+ size_on_disk=970726339,
525
+ snapshot_path=PosixPath(...),
526
+ files=frozenset({
527
+ CachedFileInfo(
528
+ file_name='config.json',
529
+ size_on_disk=1197
530
+ file_path=PosixPath(...),
531
+ blob_path=PosixPath(...),
532
+ ),
533
+ CachedFileInfo(...),
534
+ ...
535
+ }),
536
+ ),
537
+ CachedRevisionInfo(...),
538
+ ...
539
+ }),
540
+ ),
541
+ CachedRepoInfo(...),
542
+ ...
543
+ }),
544
+ warnings=[
545
+ CorruptedCacheException("Snapshots dir doesn't exist in cached repo: ..."),
546
+ CorruptedCacheException(...),
547
+ ...
548
+ ],
549
+ )
550
+ ```
551
+
552
+ You can also print a detailed report directly from the `huggingface-cli` using:
553
+ ```text
554
+ > huggingface-cli scan-cache
555
+ REPO ID REPO TYPE SIZE ON DISK NB FILES REFS LOCAL PATH
556
+ --------------------------- --------- ------------ -------- ------------------- -------------------------------------------------------------------------
557
+ glue dataset 116.3K 15 1.17.0, main, 2.4.0 /Users/lucain/.cache/huggingface/hub/datasets--glue
558
+ google/fleurs dataset 64.9M 6 main, refs/pr/1 /Users/lucain/.cache/huggingface/hub/datasets--google--fleurs
559
+ Jean-Baptiste/camembert-ner model 441.0M 7 main /Users/lucain/.cache/huggingface/hub/models--Jean-Baptiste--camembert-ner
560
+ bert-base-cased model 1.9G 13 main /Users/lucain/.cache/huggingface/hub/models--bert-base-cased
561
+ t5-base model 10.1K 3 main /Users/lucain/.cache/huggingface/hub/models--t5-base
562
+ t5-small model 970.7M 11 refs/pr/1, main /Users/lucain/.cache/huggingface/hub/models--t5-small
563
+
564
+ Done in 0.0s. Scanned 6 repo(s) for a total of 3.4G.
565
+ Got 1 warning(s) while scanning. Use -vvv to print details.
566
+ ```
567
+
568
+ Args:
569
+ cache_dir (`str` or `Path`, `optional`):
570
+ Cache directory to cache. Defaults to the default HF cache directory.
571
+
572
+ <Tip warning={true}>
573
+
574
+ Raises:
575
+
576
+ `CacheNotFound`
577
+ If the cache directory does not exist.
578
+
579
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
580
+ If the cache directory is a file, instead of a directory.
581
+
582
+ </Tip>
583
+
584
+ Returns: a [`~HFCacheInfo`] object.
585
+ """
586
+ if cache_dir is None:
587
+ cache_dir = HF_HUB_CACHE
588
+
589
+ cache_dir = Path(cache_dir).expanduser().resolve()
590
+ if not cache_dir.exists():
591
+ raise CacheNotFound(
592
+ f"Cache directory not found: {cache_dir}. Please use `cache_dir` argument or set `HF_HUB_CACHE` environment variable.",
593
+ cache_dir=cache_dir,
594
+ )
595
+
596
+ if cache_dir.is_file():
597
+ raise ValueError(
598
+ f"Scan cache expects a directory but found a file: {cache_dir}. Please use `cache_dir` argument or set `HF_HUB_CACHE` environment variable."
599
+ )
600
+
601
+ repos: Set[CachedRepoInfo] = set()
602
+ warnings: List[CorruptedCacheException] = []
603
+ for repo_path in cache_dir.iterdir():
604
+ if repo_path.name == ".locks": # skip './.locks/' folder
605
+ continue
606
+ try:
607
+ repos.add(_scan_cached_repo(repo_path))
608
+ except CorruptedCacheException as e:
609
+ warnings.append(e)
610
+
611
+ return HFCacheInfo(
612
+ repos=frozenset(repos),
613
+ size_on_disk=sum(repo.size_on_disk for repo in repos),
614
+ warnings=warnings,
615
+ )
616
+
617
+
618
+ def _scan_cached_repo(repo_path: Path) -> CachedRepoInfo:
619
+ """Scan a single cache repo and return information about it.
620
+
621
+ Any unexpected behavior will raise a [`~CorruptedCacheException`].
622
+ """
623
+ if not repo_path.is_dir():
624
+ raise CorruptedCacheException(f"Repo path is not a directory: {repo_path}")
625
+
626
+ if "--" not in repo_path.name:
627
+ raise CorruptedCacheException(f"Repo path is not a valid HuggingFace cache directory: {repo_path}")
628
+
629
+ repo_type, repo_id = repo_path.name.split("--", maxsplit=1)
630
+ repo_type = repo_type[:-1] # "models" -> "model"
631
+ repo_id = repo_id.replace("--", "/") # google/fleurs -> "google/fleurs"
632
+
633
+ if repo_type not in {"dataset", "model", "space"}:
634
+ raise CorruptedCacheException(
635
+ f"Repo type must be `dataset`, `model` or `space`, found `{repo_type}` ({repo_path})."
636
+ )
637
+
638
+ blob_stats: Dict[Path, os.stat_result] = {} # Key is blob_path, value is blob stats
639
+
640
+ snapshots_path = repo_path / "snapshots"
641
+ refs_path = repo_path / "refs"
642
+
643
+ if not snapshots_path.exists() or not snapshots_path.is_dir():
644
+ raise CorruptedCacheException(f"Snapshots dir doesn't exist in cached repo: {snapshots_path}")
645
+
646
+ # Scan over `refs` directory
647
+
648
+ # key is revision hash, value is set of refs
649
+ refs_by_hash: Dict[str, Set[str]] = defaultdict(set)
650
+ if refs_path.exists():
651
+ # Example of `refs` directory
652
+ # ── refs
653
+ # ├── main
654
+ # └── refs
655
+ # └── pr
656
+ # └── 1
657
+ if refs_path.is_file():
658
+ raise CorruptedCacheException(f"Refs directory cannot be a file: {refs_path}")
659
+
660
+ for ref_path in refs_path.glob("**/*"):
661
+ # glob("**/*") iterates over all files and directories -> skip directories
662
+ if ref_path.is_dir():
663
+ continue
664
+
665
+ ref_name = str(ref_path.relative_to(refs_path))
666
+ with ref_path.open() as f:
667
+ commit_hash = f.read()
668
+
669
+ refs_by_hash[commit_hash].add(ref_name)
670
+
671
+ # Scan snapshots directory
672
+ cached_revisions: Set[CachedRevisionInfo] = set()
673
+ for revision_path in snapshots_path.iterdir():
674
+ # Ignore OS-created helper files
675
+ if revision_path.name in FILES_TO_IGNORE:
676
+ continue
677
+ if revision_path.is_file():
678
+ raise CorruptedCacheException(f"Snapshots folder corrupted. Found a file: {revision_path}")
679
+
680
+ cached_files = set()
681
+ for file_path in revision_path.glob("**/*"):
682
+ # glob("**/*") iterates over all files and directories -> skip directories
683
+ if file_path.is_dir():
684
+ continue
685
+
686
+ blob_path = Path(file_path).resolve()
687
+ if not blob_path.exists():
688
+ raise CorruptedCacheException(f"Blob missing (broken symlink): {blob_path}")
689
+
690
+ if blob_path not in blob_stats:
691
+ blob_stats[blob_path] = blob_path.stat()
692
+
693
+ cached_files.add(
694
+ CachedFileInfo(
695
+ file_name=file_path.name,
696
+ file_path=file_path,
697
+ size_on_disk=blob_stats[blob_path].st_size,
698
+ blob_path=blob_path,
699
+ blob_last_accessed=blob_stats[blob_path].st_atime,
700
+ blob_last_modified=blob_stats[blob_path].st_mtime,
701
+ )
702
+ )
703
+
704
+ # Last modified is either the last modified blob file or the revision folder
705
+ # itself if it is empty
706
+ if len(cached_files) > 0:
707
+ revision_last_modified = max(blob_stats[file.blob_path].st_mtime for file in cached_files)
708
+ else:
709
+ revision_last_modified = revision_path.stat().st_mtime
710
+
711
+ cached_revisions.add(
712
+ CachedRevisionInfo(
713
+ commit_hash=revision_path.name,
714
+ files=frozenset(cached_files),
715
+ refs=frozenset(refs_by_hash.pop(revision_path.name, set())),
716
+ size_on_disk=sum(
717
+ blob_stats[blob_path].st_size for blob_path in set(file.blob_path for file in cached_files)
718
+ ),
719
+ snapshot_path=revision_path,
720
+ last_modified=revision_last_modified,
721
+ )
722
+ )
723
+
724
+ # Check that all refs referred to an existing revision
725
+ if len(refs_by_hash) > 0:
726
+ raise CorruptedCacheException(
727
+ f"Reference(s) refer to missing commit hashes: {dict(refs_by_hash)} ({repo_path})."
728
+ )
729
+
730
+ # Last modified is either the last modified blob file or the repo folder itself if
731
+ # no blob files has been found. Same for last accessed.
732
+ if len(blob_stats) > 0:
733
+ repo_last_accessed = max(stat.st_atime for stat in blob_stats.values())
734
+ repo_last_modified = max(stat.st_mtime for stat in blob_stats.values())
735
+ else:
736
+ repo_stats = repo_path.stat()
737
+ repo_last_accessed = repo_stats.st_atime
738
+ repo_last_modified = repo_stats.st_mtime
739
+
740
+ # Build and return frozen structure
741
+ return CachedRepoInfo(
742
+ nb_files=len(blob_stats),
743
+ repo_id=repo_id,
744
+ repo_path=repo_path,
745
+ repo_type=repo_type, # type: ignore
746
+ revisions=frozenset(cached_revisions),
747
+ size_on_disk=sum(stat.st_size for stat in blob_stats.values()),
748
+ last_accessed=repo_last_accessed,
749
+ last_modified=repo_last_modified,
750
+ )
751
+
752
+
753
+ def _format_size(num: int) -> str:
754
+ """Format size in bytes into a human-readable string.
755
+
756
+ Taken from https://stackoverflow.com/a/1094933
757
+ """
758
+ num_f = float(num)
759
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
760
+ if abs(num_f) < 1000.0:
761
+ return f"{num_f:3.1f}{unit}"
762
+ num_f /= 1000.0
763
+ return f"{num_f:.1f}Y"
764
+
765
+
766
+ _TIMESINCE_CHUNKS = (
767
+ # Label, divider, max value
768
+ ("second", 1, 60),
769
+ ("minute", 60, 60),
770
+ ("hour", 60 * 60, 24),
771
+ ("day", 60 * 60 * 24, 6),
772
+ ("week", 60 * 60 * 24 * 7, 6),
773
+ ("month", 60 * 60 * 24 * 30, 11),
774
+ ("year", 60 * 60 * 24 * 365, None),
775
+ )
776
+
777
+
778
+ def _format_timesince(ts: float) -> str:
779
+ """Format timestamp in seconds into a human-readable string, relative to now.
780
+
781
+ Vaguely inspired by Django's `timesince` formatter.
782
+ """
783
+ delta = time.time() - ts
784
+ if delta < 20:
785
+ return "a few seconds ago"
786
+ for label, divider, max_value in _TIMESINCE_CHUNKS: # noqa: B007
787
+ value = round(delta / divider)
788
+ if max_value is not None and value <= max_value:
789
+ break
790
+ return f"{value} {label}{'s' if value > 1 else ''} ago"
791
+
792
+
793
+ def _try_delete_path(path: Path, path_type: str) -> None:
794
+ """Try to delete a local file or folder.
795
+
796
+ If the path does not exists, error is logged as a warning and then ignored.
797
+
798
+ Args:
799
+ path (`Path`)
800
+ Path to delete. Can be a file or a folder.
801
+ path_type (`str`)
802
+ What path are we deleting ? Only for logging purposes. Example: "snapshot".
803
+ """
804
+ logger.info(f"Delete {path_type}: {path}")
805
+ try:
806
+ if path.is_file():
807
+ os.remove(path)
808
+ else:
809
+ shutil.rmtree(path)
810
+ except FileNotFoundError:
811
+ logger.warning(f"Couldn't delete {path_type}: file not found ({path})", exc_info=True)
812
+ except PermissionError:
813
+ logger.warning(f"Couldn't delete {path_type}: permission denied ({path})", exc_info=True)
venv/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from functools import wraps
3
+ from inspect import Parameter, signature
4
+ from typing import Iterable, Optional
5
+
6
+
7
+ def _deprecate_positional_args(*, version: str):
8
+ """Decorator for methods that issues warnings for positional arguments.
9
+ Using the keyword-only argument syntax in pep 3102, arguments after the
10
+ * will issue a warning when passed as a positional argument.
11
+
12
+ Args:
13
+ version (`str`):
14
+ The version when positional arguments will result in error.
15
+ """
16
+
17
+ def _inner_deprecate_positional_args(f):
18
+ sig = signature(f)
19
+ kwonly_args = []
20
+ all_args = []
21
+ for name, param in sig.parameters.items():
22
+ if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
23
+ all_args.append(name)
24
+ elif param.kind == Parameter.KEYWORD_ONLY:
25
+ kwonly_args.append(name)
26
+
27
+ @wraps(f)
28
+ def inner_f(*args, **kwargs):
29
+ extra_args = len(args) - len(all_args)
30
+ if extra_args <= 0:
31
+ return f(*args, **kwargs)
32
+ # extra_args > 0
33
+ args_msg = [
34
+ f"{name}='{arg}'" if isinstance(arg, str) else f"{name}={arg}"
35
+ for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
36
+ ]
37
+ args_msg = ", ".join(args_msg)
38
+ warnings.warn(
39
+ f"Deprecated positional argument(s) used in '{f.__name__}': pass"
40
+ f" {args_msg} as keyword args. From version {version} passing these"
41
+ " as positional arguments will result in an error,",
42
+ FutureWarning,
43
+ )
44
+ kwargs.update(zip(sig.parameters, args))
45
+ return f(**kwargs)
46
+
47
+ return inner_f
48
+
49
+ return _inner_deprecate_positional_args
50
+
51
+
52
+ def _deprecate_arguments(
53
+ *,
54
+ version: str,
55
+ deprecated_args: Iterable[str],
56
+ custom_message: Optional[str] = None,
57
+ ):
58
+ """Decorator to issue warnings when using deprecated arguments.
59
+
60
+ TODO: could be useful to be able to set a custom error message.
61
+
62
+ Args:
63
+ version (`str`):
64
+ The version when deprecated arguments will result in error.
65
+ deprecated_args (`List[str]`):
66
+ List of the arguments to be deprecated.
67
+ custom_message (`str`, *optional*):
68
+ Warning message that is raised. If not passed, a default warning message
69
+ will be created.
70
+ """
71
+
72
+ def _inner_deprecate_positional_args(f):
73
+ sig = signature(f)
74
+
75
+ @wraps(f)
76
+ def inner_f(*args, **kwargs):
77
+ # Check for used deprecated arguments
78
+ used_deprecated_args = []
79
+ for _, parameter in zip(args, sig.parameters.values()):
80
+ if parameter.name in deprecated_args:
81
+ used_deprecated_args.append(parameter.name)
82
+ for kwarg_name, kwarg_value in kwargs.items():
83
+ if (
84
+ # If argument is deprecated but still used
85
+ kwarg_name in deprecated_args
86
+ # And then the value is not the default value
87
+ and kwarg_value != sig.parameters[kwarg_name].default
88
+ ):
89
+ used_deprecated_args.append(kwarg_name)
90
+
91
+ # Warn and proceed
92
+ if len(used_deprecated_args) > 0:
93
+ message = (
94
+ f"Deprecated argument(s) used in '{f.__name__}':"
95
+ f" {', '.join(used_deprecated_args)}. Will not be supported from"
96
+ f" version '{version}'."
97
+ )
98
+ if custom_message is not None:
99
+ message += "\n\n" + custom_message
100
+ warnings.warn(message, FutureWarning)
101
+ return f(*args, **kwargs)
102
+
103
+ return inner_f
104
+
105
+ return _inner_deprecate_positional_args
106
+
107
+
108
+ def _deprecate_method(*, version: str, message: Optional[str] = None):
109
+ """Decorator to issue warnings when using a deprecated method.
110
+
111
+ Args:
112
+ version (`str`):
113
+ The version when deprecated arguments will result in error.
114
+ message (`str`, *optional*):
115
+ Warning message that is raised. If not passed, a default warning message
116
+ will be created.
117
+ """
118
+
119
+ def _inner_deprecate_method(f):
120
+ name = f.__name__
121
+ if name == "__init__":
122
+ name = f.__qualname__.split(".")[0] # class name instead of method name
123
+
124
+ @wraps(f)
125
+ def inner_f(*args, **kwargs):
126
+ warning_message = (
127
+ f"'{name}' (from '{f.__module__}') is deprecated and will be removed from version '{version}'."
128
+ )
129
+ if message is not None:
130
+ warning_message += " " + message
131
+ warnings.warn(warning_message, FutureWarning)
132
+ return f(*args, **kwargs)
133
+
134
+ return inner_f
135
+
136
+ return _inner_deprecate_method
venv/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Optional
3
+
4
+ from requests import HTTPError, Response
5
+
6
+ from ._fixes import JSONDecodeError
7
+
8
+
9
+ REPO_API_REGEX = re.compile(
10
+ r"""
11
+ # staging or production endpoint
12
+ ^https://[^/]+
13
+ (
14
+ # on /api/repo_type/repo_id
15
+ /api/(models|datasets|spaces)/(.+)
16
+ |
17
+ # or /repo_id/resolve/revision/...
18
+ /(.+)/resolve/(.+)
19
+ )
20
+ """,
21
+ flags=re.VERBOSE,
22
+ )
23
+
24
+
25
+ class FileMetadataError(OSError):
26
+ """Error triggered when the metadata of a file on the Hub cannot be retrieved (missing ETag or commit_hash).
27
+
28
+ Inherits from `OSError` for backward compatibility.
29
+ """
30
+
31
+
32
+ class HfHubHTTPError(HTTPError):
33
+ """
34
+ HTTPError to inherit from for any custom HTTP Error raised in HF Hub.
35
+
36
+ Any HTTPError is converted at least into a `HfHubHTTPError`. If some information is
37
+ sent back by the server, it will be added to the error message.
38
+
39
+ Added details:
40
+ - Request id from "X-Request-Id" header if exists.
41
+ - Server error message from the header "X-Error-Message".
42
+ - Server error message if we can found one in the response body.
43
+
44
+ Example:
45
+ ```py
46
+ import requests
47
+ from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError
48
+
49
+ response = get_session().post(...)
50
+ try:
51
+ hf_raise_for_status(response)
52
+ except HfHubHTTPError as e:
53
+ print(str(e)) # formatted message
54
+ e.request_id, e.server_message # details returned by server
55
+
56
+ # Complete the error message with additional information once it's raised
57
+ e.append_to_message("\n`create_commit` expects the repository to exist.")
58
+ raise
59
+ ```
60
+ """
61
+
62
+ request_id: Optional[str] = None
63
+ server_message: Optional[str] = None
64
+
65
+ def __init__(self, message: str, response: Optional[Response] = None):
66
+ # Parse server information if any.
67
+ if response is not None:
68
+ self.request_id = response.headers.get("X-Request-Id")
69
+ try:
70
+ server_data = response.json()
71
+ except JSONDecodeError:
72
+ server_data = {}
73
+
74
+ # Retrieve server error message from multiple sources
75
+ server_message_from_headers = response.headers.get("X-Error-Message")
76
+ server_message_from_body = server_data.get("error")
77
+ server_multiple_messages_from_body = "\n".join(
78
+ error["message"] for error in server_data.get("errors", []) if "message" in error
79
+ )
80
+
81
+ # Concatenate error messages
82
+ _server_message = ""
83
+ if server_message_from_headers is not None: # from headers
84
+ _server_message += server_message_from_headers + "\n"
85
+ if server_message_from_body is not None: # from body "error"
86
+ if isinstance(server_message_from_body, list):
87
+ server_message_from_body = "\n".join(server_message_from_body)
88
+ if server_message_from_body not in _server_message:
89
+ _server_message += server_message_from_body + "\n"
90
+ if server_multiple_messages_from_body is not None: # from body "errors"
91
+ if server_multiple_messages_from_body not in _server_message:
92
+ _server_message += server_multiple_messages_from_body + "\n"
93
+ _server_message = _server_message.strip()
94
+
95
+ # Set message to `HfHubHTTPError` (if any)
96
+ if _server_message != "":
97
+ self.server_message = _server_message
98
+
99
+ super().__init__(
100
+ _format_error_message(
101
+ message,
102
+ request_id=self.request_id,
103
+ server_message=self.server_message,
104
+ ),
105
+ response=response, # type: ignore
106
+ request=response.request if response is not None else None, # type: ignore
107
+ )
108
+
109
+ def append_to_message(self, additional_message: str) -> None:
110
+ """Append additional information to the `HfHubHTTPError` initial message."""
111
+ self.args = (self.args[0] + additional_message,) + self.args[1:]
112
+
113
+
114
+ class RepositoryNotFoundError(HfHubHTTPError):
115
+ """
116
+ Raised when trying to access a hf.co URL with an invalid repository name, or
117
+ with a private repo name the user does not have access to.
118
+
119
+ Example:
120
+
121
+ ```py
122
+ >>> from huggingface_hub import model_info
123
+ >>> model_info("<non_existent_repository>")
124
+ (...)
125
+ huggingface_hub.utils._errors.RepositoryNotFoundError: 401 Client Error. (Request ID: PvMw_VjBMjVdMz53WKIzP)
126
+
127
+ Repository Not Found for url: https://huggingface.co/api/models/%3Cnon_existent_repository%3E.
128
+ Please make sure you specified the correct `repo_id` and `repo_type`.
129
+ If the repo is private, make sure you are authenticated.
130
+ Invalid username or password.
131
+ ```
132
+ """
133
+
134
+
135
+ class GatedRepoError(RepositoryNotFoundError):
136
+ """
137
+ Raised when trying to access a gated repository for which the user is not on the
138
+ authorized list.
139
+
140
+ Note: derives from `RepositoryNotFoundError` to ensure backward compatibility.
141
+
142
+ Example:
143
+
144
+ ```py
145
+ >>> from huggingface_hub import model_info
146
+ >>> model_info("<gated_repository>")
147
+ (...)
148
+ huggingface_hub.utils._errors.GatedRepoError: 403 Client Error. (Request ID: ViT1Bf7O_026LGSQuVqfa)
149
+
150
+ Cannot access gated repo for url https://huggingface.co/api/models/ardent-figment/gated-model.
151
+ Access to model ardent-figment/gated-model is restricted and you are not in the authorized list.
152
+ Visit https://huggingface.co/ardent-figment/gated-model to ask for access.
153
+ ```
154
+ """
155
+
156
+
157
+ class DisabledRepoError(HfHubHTTPError):
158
+ """
159
+ Raised when trying to access a repository that has been disabled by its author.
160
+
161
+ Example:
162
+
163
+ ```py
164
+ >>> from huggingface_hub import dataset_info
165
+ >>> dataset_info("laion/laion-art")
166
+ (...)
167
+ huggingface_hub.utils._errors.DisabledRepoError: 403 Client Error. (Request ID: Root=1-659fc3fa-3031673e0f92c71a2260dbe2;bc6f4dfb-b30a-4862-af0a-5cfe827610d8)
168
+
169
+ Cannot access repository for url https://huggingface.co/api/datasets/laion/laion-art.
170
+ Access to this resource is disabled.
171
+ ```
172
+ """
173
+
174
+
175
+ class RevisionNotFoundError(HfHubHTTPError):
176
+ """
177
+ Raised when trying to access a hf.co URL with a valid repository but an invalid
178
+ revision.
179
+
180
+ Example:
181
+
182
+ ```py
183
+ >>> from huggingface_hub import hf_hub_download
184
+ >>> hf_hub_download('bert-base-cased', 'config.json', revision='<non-existent-revision>')
185
+ (...)
186
+ huggingface_hub.utils._errors.RevisionNotFoundError: 404 Client Error. (Request ID: Mwhe_c3Kt650GcdKEFomX)
187
+
188
+ Revision Not Found for url: https://huggingface.co/bert-base-cased/resolve/%3Cnon-existent-revision%3E/config.json.
189
+ ```
190
+ """
191
+
192
+
193
+ class EntryNotFoundError(HfHubHTTPError):
194
+ """
195
+ Raised when trying to access a hf.co URL with a valid repository and revision
196
+ but an invalid filename.
197
+
198
+ Example:
199
+
200
+ ```py
201
+ >>> from huggingface_hub import hf_hub_download
202
+ >>> hf_hub_download('bert-base-cased', '<non-existent-file>')
203
+ (...)
204
+ huggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: 53pNl6M0MxsnG5Sw8JA6x)
205
+
206
+ Entry Not Found for url: https://huggingface.co/bert-base-cased/resolve/main/%3Cnon-existent-file%3E.
207
+ ```
208
+ """
209
+
210
+
211
+ class LocalEntryNotFoundError(EntryNotFoundError, FileNotFoundError, ValueError):
212
+ """
213
+ Raised when trying to access a file or snapshot that is not on the disk when network is
214
+ disabled or unavailable (connection issue). The entry may exist on the Hub.
215
+
216
+ Note: `ValueError` type is to ensure backward compatibility.
217
+ Note: `LocalEntryNotFoundError` derives from `HTTPError` because of `EntryNotFoundError`
218
+ even when it is not a network issue.
219
+
220
+ Example:
221
+
222
+ ```py
223
+ >>> from huggingface_hub import hf_hub_download
224
+ >>> hf_hub_download('bert-base-cased', '<non-cached-file>', local_files_only=True)
225
+ (...)
226
+ huggingface_hub.utils._errors.LocalEntryNotFoundError: Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co look-ups and downloads online, set 'local_files_only' to False.
227
+ ```
228
+ """
229
+
230
+ def __init__(self, message: str):
231
+ super().__init__(message, response=None)
232
+
233
+
234
+ class BadRequestError(HfHubHTTPError, ValueError):
235
+ """
236
+ Raised by `hf_raise_for_status` when the server returns a HTTP 400 error.
237
+
238
+ Example:
239
+
240
+ ```py
241
+ >>> resp = requests.post("hf.co/api/check", ...)
242
+ >>> hf_raise_for_status(resp, endpoint_name="check")
243
+ huggingface_hub.utils._errors.BadRequestError: Bad request for check endpoint: {details} (Request ID: XXX)
244
+ ```
245
+ """
246
+
247
+
248
+ def hf_raise_for_status(response: Response, endpoint_name: Optional[str] = None) -> None:
249
+ """
250
+ Internal version of `response.raise_for_status()` that will refine a
251
+ potential HTTPError. Raised exception will be an instance of `HfHubHTTPError`.
252
+
253
+ This helper is meant to be the unique method to raise_for_status when making a call
254
+ to the Hugging Face Hub.
255
+
256
+ Example:
257
+ ```py
258
+ import requests
259
+ from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError
260
+
261
+ response = get_session().post(...)
262
+ try:
263
+ hf_raise_for_status(response)
264
+ except HfHubHTTPError as e:
265
+ print(str(e)) # formatted message
266
+ e.request_id, e.server_message # details returned by server
267
+
268
+ # Complete the error message with additional information once it's raised
269
+ e.append_to_message("\n`create_commit` expects the repository to exist.")
270
+ raise
271
+ ```
272
+
273
+ Args:
274
+ response (`Response`):
275
+ Response from the server.
276
+ endpoint_name (`str`, *optional*):
277
+ Name of the endpoint that has been called. If provided, the error message
278
+ will be more complete.
279
+
280
+ <Tip warning={true}>
281
+
282
+ Raises when the request has failed:
283
+
284
+ - [`~utils.RepositoryNotFoundError`]
285
+ If the repository to download from cannot be found. This may be because it
286
+ doesn't exist, because `repo_type` is not set correctly, or because the repo
287
+ is `private` and you do not have access.
288
+ - [`~utils.GatedRepoError`]
289
+ If the repository exists but is gated and the user is not on the authorized
290
+ list.
291
+ - [`~utils.RevisionNotFoundError`]
292
+ If the repository exists but the revision couldn't be find.
293
+ - [`~utils.EntryNotFoundError`]
294
+ If the repository exists but the entry (e.g. the requested file) couldn't be
295
+ find.
296
+ - [`~utils.BadRequestError`]
297
+ If request failed with a HTTP 400 BadRequest error.
298
+ - [`~utils.HfHubHTTPError`]
299
+ If request failed for a reason not listed above.
300
+
301
+ </Tip>
302
+ """
303
+ try:
304
+ response.raise_for_status()
305
+ except HTTPError as e:
306
+ error_code = response.headers.get("X-Error-Code")
307
+ error_message = response.headers.get("X-Error-Message")
308
+
309
+ if error_code == "RevisionNotFound":
310
+ message = f"{response.status_code} Client Error." + "\n\n" + f"Revision Not Found for url: {response.url}."
311
+ raise RevisionNotFoundError(message, response) from e
312
+
313
+ elif error_code == "EntryNotFound":
314
+ message = f"{response.status_code} Client Error." + "\n\n" + f"Entry Not Found for url: {response.url}."
315
+ raise EntryNotFoundError(message, response) from e
316
+
317
+ elif error_code == "GatedRepo":
318
+ message = (
319
+ f"{response.status_code} Client Error." + "\n\n" + f"Cannot access gated repo for url {response.url}."
320
+ )
321
+ raise GatedRepoError(message, response) from e
322
+
323
+ elif error_message == "Access to this resource is disabled.":
324
+ message = (
325
+ f"{response.status_code} Client Error."
326
+ + "\n\n"
327
+ + f"Cannot access repository for url {response.url}."
328
+ + "\n"
329
+ + "Access to this resource is disabled."
330
+ )
331
+ raise DisabledRepoError(message, response) from e
332
+
333
+ elif error_code == "RepoNotFound" or (
334
+ response.status_code == 401
335
+ and response.request is not None
336
+ and response.request.url is not None
337
+ and REPO_API_REGEX.search(response.request.url) is not None
338
+ ):
339
+ # 401 is misleading as it is returned for:
340
+ # - private and gated repos if user is not authenticated
341
+ # - missing repos
342
+ # => for now, we process them as `RepoNotFound` anyway.
343
+ # See https://gist.github.com/Wauplin/46c27ad266b15998ce56a6603796f0b9
344
+ message = (
345
+ f"{response.status_code} Client Error."
346
+ + "\n\n"
347
+ + f"Repository Not Found for url: {response.url}."
348
+ + "\nPlease make sure you specified the correct `repo_id` and"
349
+ " `repo_type`.\nIf you are trying to access a private or gated repo,"
350
+ " make sure you are authenticated."
351
+ )
352
+ raise RepositoryNotFoundError(message, response) from e
353
+
354
+ elif response.status_code == 400:
355
+ message = (
356
+ f"\n\nBad request for {endpoint_name} endpoint:" if endpoint_name is not None else "\n\nBad request:"
357
+ )
358
+ raise BadRequestError(message, response=response) from e
359
+
360
+ elif response.status_code == 403:
361
+ message = (
362
+ f"\n\n{response.status_code} Forbidden: {error_message}."
363
+ + f"\nCannot access content at: {response.url}."
364
+ + "\nIf you are trying to create or update content,"
365
+ + "make sure you have a token with the `write` role."
366
+ )
367
+ raise HfHubHTTPError(message, response=response) from e
368
+
369
+ # Convert `HTTPError` into a `HfHubHTTPError` to display request information
370
+ # as well (request id and/or server error message)
371
+ raise HfHubHTTPError(str(e), response=response) from e
372
+
373
+
374
+ def _format_error_message(message: str, request_id: Optional[str], server_message: Optional[str]) -> str:
375
+ """
376
+ Format the `HfHubHTTPError` error message based on initial message and information
377
+ returned by the server.
378
+
379
+ Used when initializing `HfHubHTTPError`.
380
+ """
381
+ # Add message from response body
382
+ if server_message is not None and len(server_message) > 0 and server_message.lower() not in message.lower():
383
+ if "\n\n" in message:
384
+ message += "\n" + server_message
385
+ else:
386
+ message += "\n\n" + server_message
387
+
388
+ # Add Request ID
389
+ if request_id is not None and str(request_id).lower() not in message.lower():
390
+ request_id_message = f" (Request ID: {request_id})"
391
+ if "\n" in message:
392
+ newline_index = message.index("\n")
393
+ message = message[:newline_index] + request_id_message + message[newline_index:]
394
+ else:
395
+ message += request_id_message
396
+
397
+ return message
venv/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to flag a feature as "experimental" in Huggingface Hub."""
16
+
17
+ import warnings
18
+ from functools import wraps
19
+ from typing import Callable
20
+
21
+ from .. import constants
22
+
23
+
24
+ def experimental(fn: Callable) -> Callable:
25
+ """Decorator to flag a feature as experimental.
26
+
27
+ An experimental feature trigger a warning when used as it might be subject to breaking changes in the future.
28
+ Warnings can be disabled by setting the environment variable `HF_EXPERIMENTAL_WARNING` to `0`.
29
+
30
+ Args:
31
+ fn (`Callable`):
32
+ The function to flag as experimental.
33
+
34
+ Returns:
35
+ `Callable`: The decorated function.
36
+
37
+ Example:
38
+
39
+ ```python
40
+ >>> from huggingface_hub.utils import experimental
41
+
42
+ >>> @experimental
43
+ ... def my_function():
44
+ ... print("Hello world!")
45
+
46
+ >>> my_function()
47
+ UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future. You can disable
48
+ this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment variable.
49
+ Hello world!
50
+ ```
51
+ """
52
+ # For classes, put the "experimental" around the "__new__" method => __new__ will be removed in warning message
53
+ name = fn.__qualname__[: -len(".__new__")] if fn.__qualname__.endswith(".__new__") else fn.__qualname__
54
+
55
+ @wraps(fn)
56
+ def _inner_fn(*args, **kwargs):
57
+ if not constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING:
58
+ warnings.warn(
59
+ f"'{name}' is experimental and might be subject to breaking changes in the future."
60
+ " You can disable this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment"
61
+ " variable.",
62
+ UserWarning,
63
+ )
64
+ return fn(*args, **kwargs)
65
+
66
+ return _inner_fn
venv/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # JSONDecodeError was introduced in requests=2.27 released in 2022.
2
+ # This allows us to support older requests for users
3
+ # More information: https://github.com/psf/requests/pull/5856
4
+ try:
5
+ from requests import JSONDecodeError # type: ignore # noqa: F401
6
+ except ImportError:
7
+ try:
8
+ from simplejson import JSONDecodeError # type: ignore # noqa: F401
9
+ except ImportError:
10
+ from json import JSONDecodeError # type: ignore # noqa: F401
11
+ import contextlib
12
+ import os
13
+ import shutil
14
+ import stat
15
+ import tempfile
16
+ from functools import partial
17
+ from pathlib import Path
18
+ from typing import Callable, Generator, Optional, Union
19
+
20
+ import yaml
21
+ from filelock import BaseFileLock, FileLock
22
+
23
+
24
+ # Wrap `yaml.dump` to set `allow_unicode=True` by default.
25
+ #
26
+ # Example:
27
+ # ```py
28
+ # >>> yaml.dump({"emoji": "👀", "some unicode": "日本か"})
29
+ # 'emoji: "\\U0001F440"\nsome unicode: "\\u65E5\\u672C\\u304B"\n'
30
+ #
31
+ # >>> yaml_dump({"emoji": "👀", "some unicode": "日本か"})
32
+ # 'emoji: "👀"\nsome unicode: "日本か"\n'
33
+ # ```
34
+ yaml_dump: Callable[..., str] = partial(yaml.dump, stream=None, allow_unicode=True) # type: ignore
35
+
36
+
37
+ @contextlib.contextmanager
38
+ def SoftTemporaryDirectory(
39
+ suffix: Optional[str] = None,
40
+ prefix: Optional[str] = None,
41
+ dir: Optional[Union[Path, str]] = None,
42
+ **kwargs,
43
+ ) -> Generator[Path, None, None]:
44
+ """
45
+ Context manager to create a temporary directory and safely delete it.
46
+
47
+ If tmp directory cannot be deleted normally, we set the WRITE permission and retry.
48
+ If cleanup still fails, we give up but don't raise an exception. This is equivalent
49
+ to `tempfile.TemporaryDirectory(..., ignore_cleanup_errors=True)` introduced in
50
+ Python 3.10.
51
+
52
+ See https://www.scivision.dev/python-tempfile-permission-error-windows/.
53
+ """
54
+ tmpdir = tempfile.TemporaryDirectory(prefix=prefix, suffix=suffix, dir=dir, **kwargs)
55
+ yield Path(tmpdir.name).resolve()
56
+
57
+ try:
58
+ # First once with normal cleanup
59
+ shutil.rmtree(tmpdir.name)
60
+ except Exception:
61
+ # If failed, try to set write permission and retry
62
+ try:
63
+ shutil.rmtree(tmpdir.name, onerror=_set_write_permission_and_retry)
64
+ except Exception:
65
+ pass
66
+
67
+ # And finally, cleanup the tmpdir.
68
+ # If it fails again, give up but do not throw error
69
+ try:
70
+ tmpdir.cleanup()
71
+ except Exception:
72
+ pass
73
+
74
+
75
+ def _set_write_permission_and_retry(func, path, excinfo):
76
+ os.chmod(path, stat.S_IWRITE)
77
+ func(path)
78
+
79
+
80
+ @contextlib.contextmanager
81
+ def WeakFileLock(lock_file: Union[str, Path]) -> Generator[BaseFileLock, None, None]:
82
+ lock = FileLock(lock_file)
83
+ lock.acquire()
84
+
85
+ yield lock
86
+
87
+ try:
88
+ return lock.release()
89
+ except OSError:
90
+ try:
91
+ Path(lock_file).unlink()
92
+ except OSError:
93
+ pass
venv/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to manage Git credentials."""
16
+
17
+ import re
18
+ import subprocess
19
+ from typing import List, Optional
20
+
21
+ from ..constants import ENDPOINT
22
+ from ._subprocess import run_interactive_subprocess, run_subprocess
23
+
24
+
25
+ GIT_CREDENTIAL_REGEX = re.compile(
26
+ r"""
27
+ ^\s* # start of line
28
+ credential\.helper # credential.helper value
29
+ \s*=\s* # separator
30
+ (\w+) # the helper name (group 1)
31
+ (\s|$) # whitespace or end of line
32
+ """,
33
+ flags=re.MULTILINE | re.IGNORECASE | re.VERBOSE,
34
+ )
35
+
36
+
37
+ def list_credential_helpers(folder: Optional[str] = None) -> List[str]:
38
+ """Return the list of git credential helpers configured.
39
+
40
+ See https://git-scm.com/docs/gitcredentials.
41
+
42
+ Credentials are saved in all configured helpers (store, cache, macOS keychain,...).
43
+ Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential.
44
+
45
+ Args:
46
+ folder (`str`, *optional*):
47
+ The folder in which to check the configured helpers.
48
+ """
49
+ try:
50
+ output = run_subprocess("git config --list", folder=folder).stdout
51
+ parsed = _parse_credential_output(output)
52
+ return parsed
53
+ except subprocess.CalledProcessError as exc:
54
+ raise EnvironmentError(exc.stderr)
55
+
56
+
57
+ def set_git_credential(token: str, username: str = "hf_user", folder: Optional[str] = None) -> None:
58
+ """Save a username/token pair in git credential for HF Hub registry.
59
+
60
+ Credentials are saved in all configured helpers (store, cache, macOS keychain,...).
61
+ Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential.
62
+
63
+ Args:
64
+ username (`str`, defaults to `"hf_user"`):
65
+ A git username. Defaults to `"hf_user"`, the default user used in the Hub.
66
+ token (`str`, defaults to `"hf_user"`):
67
+ A git password. In practice, the User Access Token for the Hub.
68
+ See https://huggingface.co/settings/tokens.
69
+ folder (`str`, *optional*):
70
+ The folder in which to check the configured helpers.
71
+ """
72
+ with run_interactive_subprocess("git credential approve", folder=folder) as (
73
+ stdin,
74
+ _,
75
+ ):
76
+ stdin.write(f"url={ENDPOINT}\nusername={username.lower()}\npassword={token}\n\n")
77
+ stdin.flush()
78
+
79
+
80
+ def unset_git_credential(username: str = "hf_user", folder: Optional[str] = None) -> None:
81
+ """Erase credentials from git credential for HF Hub registry.
82
+
83
+ Credentials are erased from the configured helpers (store, cache, macOS
84
+ keychain,...), if any. If `username` is not provided, any credential configured for
85
+ HF Hub endpoint is erased.
86
+ Calls "`git credential erase`" internally. See https://git-scm.com/docs/git-credential.
87
+
88
+ Args:
89
+ username (`str`, defaults to `"hf_user"`):
90
+ A git username. Defaults to `"hf_user"`, the default user used in the Hub.
91
+ folder (`str`, *optional*):
92
+ The folder in which to check the configured helpers.
93
+ """
94
+ with run_interactive_subprocess("git credential reject", folder=folder) as (
95
+ stdin,
96
+ _,
97
+ ):
98
+ standard_input = f"url={ENDPOINT}\n"
99
+ if username is not None:
100
+ standard_input += f"username={username.lower()}\n"
101
+ standard_input += "\n"
102
+
103
+ stdin.write(standard_input)
104
+ stdin.flush()
105
+
106
+
107
+ def _parse_credential_output(output: str) -> List[str]:
108
+ """Parse the output of `git credential fill` to extract the password.
109
+
110
+ Args:
111
+ output (`str`):
112
+ The output of `git credential fill`.
113
+ """
114
+ # NOTE: If user has set an helper for a custom URL, it will not we caught here.
115
+ # Example: `credential.https://huggingface.co.helper=store`
116
+ # See: https://github.com/huggingface/huggingface_hub/pull/1138#discussion_r1013324508
117
+ return sorted( # Sort for nice printing
118
+ set( # Might have some duplicates
119
+ match[0] for match in GIT_CREDENTIAL_REGEX.findall(output)
120
+ )
121
+ )
venv/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to handle headers to send in calls to Huggingface Hub."""
16
+
17
+ from typing import Dict, Optional, Union
18
+
19
+ from .. import constants
20
+ from ._runtime import (
21
+ get_fastai_version,
22
+ get_fastcore_version,
23
+ get_hf_hub_version,
24
+ get_python_version,
25
+ get_tf_version,
26
+ get_torch_version,
27
+ is_fastai_available,
28
+ is_fastcore_available,
29
+ is_tf_available,
30
+ is_torch_available,
31
+ )
32
+ from ._token import get_token
33
+ from ._validators import validate_hf_hub_args
34
+
35
+
36
+ class LocalTokenNotFoundError(EnvironmentError):
37
+ """Raised if local token is required but not found."""
38
+
39
+
40
+ @validate_hf_hub_args
41
+ def build_hf_headers(
42
+ *,
43
+ token: Optional[Union[bool, str]] = None,
44
+ is_write_action: bool = False,
45
+ library_name: Optional[str] = None,
46
+ library_version: Optional[str] = None,
47
+ user_agent: Union[Dict, str, None] = None,
48
+ headers: Optional[Dict[str, str]] = None,
49
+ ) -> Dict[str, str]:
50
+ """
51
+ Build headers dictionary to send in a HF Hub call.
52
+
53
+ By default, authorization token is always provided either from argument (explicit
54
+ use) or retrieved from the cache (implicit use). To explicitly avoid sending the
55
+ token to the Hub, set `token=False` or set the `HF_HUB_DISABLE_IMPLICIT_TOKEN`
56
+ environment variable.
57
+
58
+ In case of an API call that requires write access, an error is thrown if token is
59
+ `None` or token is an organization token (starting with `"api_org***"`).
60
+
61
+ In addition to the auth header, a user-agent is added to provide information about
62
+ the installed packages (versions of python, huggingface_hub, torch, tensorflow,
63
+ fastai and fastcore).
64
+
65
+ Args:
66
+ token (`str`, `bool`, *optional*):
67
+ The token to be sent in authorization header for the Hub call:
68
+ - if a string, it is used as the Hugging Face token
69
+ - if `True`, the token is read from the machine (cache or env variable)
70
+ - if `False`, authorization header is not set
71
+ - if `None`, the token is read from the machine only except if
72
+ `HF_HUB_DISABLE_IMPLICIT_TOKEN` env variable is set.
73
+ is_write_action (`bool`, default to `False`):
74
+ Set to True if the API call requires a write access. If `True`, the token
75
+ will be validated (cannot be `None`, cannot start by `"api_org***"`).
76
+ library_name (`str`, *optional*):
77
+ The name of the library that is making the HTTP request. Will be added to
78
+ the user-agent header.
79
+ library_version (`str`, *optional*):
80
+ The version of the library that is making the HTTP request. Will be added
81
+ to the user-agent header.
82
+ user_agent (`str`, `dict`, *optional*):
83
+ The user agent info in the form of a dictionary or a single string. It will
84
+ be completed with information about the installed packages.
85
+ headers (`dict`, *optional*):
86
+ Additional headers to include in the request. Those headers take precedence
87
+ over the ones generated by this function.
88
+
89
+ Returns:
90
+ A `Dict` of headers to pass in your API call.
91
+
92
+ Example:
93
+ ```py
94
+ >>> build_hf_headers(token="hf_***") # explicit token
95
+ {"authorization": "Bearer hf_***", "user-agent": ""}
96
+
97
+ >>> build_hf_headers(token=True) # explicitly use cached token
98
+ {"authorization": "Bearer hf_***",...}
99
+
100
+ >>> build_hf_headers(token=False) # explicitly don't use cached token
101
+ {"user-agent": ...}
102
+
103
+ >>> build_hf_headers() # implicit use of the cached token
104
+ {"authorization": "Bearer hf_***",...}
105
+
106
+ # HF_HUB_DISABLE_IMPLICIT_TOKEN=True # to set as env variable
107
+ >>> build_hf_headers() # token is not sent
108
+ {"user-agent": ...}
109
+
110
+ >>> build_hf_headers(token="api_org_***", is_write_action=True)
111
+ ValueError: You must use your personal account token for write-access methods.
112
+
113
+ >>> build_hf_headers(library_name="transformers", library_version="1.2.3")
114
+ {"authorization": ..., "user-agent": "transformers/1.2.3; hf_hub/0.10.2; python/3.10.4; tensorflow/1.55"}
115
+ ```
116
+
117
+ Raises:
118
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
119
+ If organization token is passed and "write" access is required.
120
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
121
+ If "write" access is required but token is not passed and not saved locally.
122
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
123
+ If `token=True` but token is not saved locally.
124
+ """
125
+ # Get auth token to send
126
+ token_to_send = get_token_to_send(token)
127
+ _validate_token_to_send(token_to_send, is_write_action=is_write_action)
128
+
129
+ # Combine headers
130
+ hf_headers = {
131
+ "user-agent": _http_user_agent(
132
+ library_name=library_name,
133
+ library_version=library_version,
134
+ user_agent=user_agent,
135
+ )
136
+ }
137
+ if token_to_send is not None:
138
+ hf_headers["authorization"] = f"Bearer {token_to_send}"
139
+ if headers is not None:
140
+ hf_headers.update(headers)
141
+ return hf_headers
142
+
143
+
144
+ def get_token_to_send(token: Optional[Union[bool, str]]) -> Optional[str]:
145
+ """Select the token to send from either `token` or the cache."""
146
+ # Case token is explicitly provided
147
+ if isinstance(token, str):
148
+ return token
149
+
150
+ # Case token is explicitly forbidden
151
+ if token is False:
152
+ return None
153
+
154
+ # Token is not provided: we get it from local cache
155
+ cached_token = get_token()
156
+
157
+ # Case token is explicitly required
158
+ if token is True:
159
+ if cached_token is None:
160
+ raise LocalTokenNotFoundError(
161
+ "Token is required (`token=True`), but no token found. You"
162
+ " need to provide a token or be logged in to Hugging Face with"
163
+ " `huggingface-cli login` or `huggingface_hub.login`. See"
164
+ " https://huggingface.co/settings/tokens."
165
+ )
166
+ return cached_token
167
+
168
+ # Case implicit use of the token is forbidden by env variable
169
+ if constants.HF_HUB_DISABLE_IMPLICIT_TOKEN:
170
+ return None
171
+
172
+ # Otherwise: we use the cached token as the user has not explicitly forbidden it
173
+ return cached_token
174
+
175
+
176
+ def _validate_token_to_send(token: Optional[str], is_write_action: bool) -> None:
177
+ if is_write_action:
178
+ if token is None:
179
+ raise ValueError(
180
+ "Token is required (write-access action) but no token found. You need"
181
+ " to provide a token or be logged in to Hugging Face with"
182
+ " `huggingface-cli login` or `huggingface_hub.login`. See"
183
+ " https://huggingface.co/settings/tokens."
184
+ )
185
+ if token.startswith("api_org"):
186
+ raise ValueError(
187
+ "You must use your personal account token for write-access methods. To"
188
+ " generate a write-access token, go to"
189
+ " https://huggingface.co/settings/tokens"
190
+ )
191
+
192
+
193
+ def _http_user_agent(
194
+ *,
195
+ library_name: Optional[str] = None,
196
+ library_version: Optional[str] = None,
197
+ user_agent: Union[Dict, str, None] = None,
198
+ ) -> str:
199
+ """Format a user-agent string containing information about the installed packages.
200
+
201
+ Args:
202
+ library_name (`str`, *optional*):
203
+ The name of the library that is making the HTTP request.
204
+ library_version (`str`, *optional*):
205
+ The version of the library that is making the HTTP request.
206
+ user_agent (`str`, `dict`, *optional*):
207
+ The user agent info in the form of a dictionary or a single string.
208
+
209
+ Returns:
210
+ The formatted user-agent string.
211
+ """
212
+ if library_name is not None:
213
+ ua = f"{library_name}/{library_version}"
214
+ else:
215
+ ua = "unknown/None"
216
+ ua += f"; hf_hub/{get_hf_hub_version()}"
217
+ ua += f"; python/{get_python_version()}"
218
+
219
+ if not constants.HF_HUB_DISABLE_TELEMETRY:
220
+ if is_torch_available():
221
+ ua += f"; torch/{get_torch_version()}"
222
+ if is_tf_available():
223
+ ua += f"; tensorflow/{get_tf_version()}"
224
+ if is_fastai_available():
225
+ ua += f"; fastai/{get_fastai_version()}"
226
+ if is_fastcore_available():
227
+ ua += f"; fastcore/{get_fastcore_version()}"
228
+
229
+ if isinstance(user_agent, dict):
230
+ ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
231
+ elif isinstance(user_agent, str):
232
+ ua += "; " + user_agent
233
+
234
+ return _deduplicate_user_agent(ua)
235
+
236
+
237
+ def _deduplicate_user_agent(user_agent: str) -> str:
238
+ """Deduplicate redundant information in the generated user-agent."""
239
+ # Split around ";" > Strip whitespaces > Store as dict keys (ensure unicity) > format back as string
240
+ # Order is implicitly preserved by dictionary structure (see https://stackoverflow.com/a/53657523).
241
+ return "; ".join({key.strip(): None for key in user_agent.split(";")}.keys())
venv/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contain helper class to retrieve/store token from/to local cache."""
16
+
17
+ import warnings
18
+ from pathlib import Path
19
+ from typing import Optional
20
+
21
+ from .. import constants
22
+ from ._token import get_token
23
+
24
+
25
+ class HfFolder:
26
+ path_token = Path(constants.HF_TOKEN_PATH)
27
+ # Private attribute. Will be removed in v0.15
28
+ _old_path_token = Path(constants._OLD_HF_TOKEN_PATH)
29
+
30
+ # TODO: deprecate when adapted in transformers/datasets/gradio
31
+ # @_deprecate_method(version="1.0", message="Use `huggingface_hub.login` instead.")
32
+ @classmethod
33
+ def save_token(cls, token: str) -> None:
34
+ """
35
+ Save token, creating folder as needed.
36
+
37
+ Token is saved in the huggingface home folder. You can configure it by setting
38
+ the `HF_HOME` environment variable.
39
+
40
+ Args:
41
+ token (`str`):
42
+ The token to save to the [`HfFolder`]
43
+ """
44
+ cls.path_token.parent.mkdir(parents=True, exist_ok=True)
45
+ cls.path_token.write_text(token)
46
+
47
+ # TODO: deprecate when adapted in transformers/datasets/gradio
48
+ # @_deprecate_method(version="1.0", message="Use `huggingface_hub.get_token` instead.")
49
+ @classmethod
50
+ def get_token(cls) -> Optional[str]:
51
+ """
52
+ Get token or None if not existent.
53
+
54
+ This method is deprecated in favor of [`huggingface_hub.get_token`] but is kept for backward compatibility.
55
+ Its behavior is the same as [`huggingface_hub.get_token`].
56
+
57
+ Returns:
58
+ `str` or `None`: The token, `None` if it doesn't exist.
59
+ """
60
+ # 0. Check if token exist in old path but not new location
61
+ try:
62
+ cls._copy_to_new_path_and_warn()
63
+ except Exception: # if not possible (e.g. PermissionError), do not raise
64
+ pass
65
+
66
+ return get_token()
67
+
68
+ # TODO: deprecate when adapted in transformers/datasets/gradio
69
+ # @_deprecate_method(version="1.0", message="Use `huggingface_hub.logout` instead.")
70
+ @classmethod
71
+ def delete_token(cls) -> None:
72
+ """
73
+ Deletes the token from storage. Does not fail if token does not exist.
74
+ """
75
+ try:
76
+ cls.path_token.unlink()
77
+ except FileNotFoundError:
78
+ pass
79
+
80
+ try:
81
+ cls._old_path_token.unlink()
82
+ except FileNotFoundError:
83
+ pass
84
+
85
+ @classmethod
86
+ def _copy_to_new_path_and_warn(cls):
87
+ if cls._old_path_token.exists() and not cls.path_token.exists():
88
+ cls.save_token(cls._old_path_token.read_text())
89
+ warnings.warn(
90
+ f"A token has been found in `{cls._old_path_token}`. This is the old"
91
+ " path where tokens were stored. The new location is"
92
+ f" `{cls.path_token}` which is configurable using `HF_HOME` environment"
93
+ " variable. Your token has been copied to this new location. You can"
94
+ " now safely delete the old token file manually or use"
95
+ " `huggingface-cli logout`."
96
+ )
venv/lib/python3.10/site-packages/huggingface_hub/utils/_http.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to handle HTTP requests in Huggingface Hub."""
16
+
17
+ import io
18
+ import os
19
+ import threading
20
+ import time
21
+ import uuid
22
+ from functools import lru_cache
23
+ from http import HTTPStatus
24
+ from typing import Callable, Optional, Tuple, Type, Union
25
+
26
+ import requests
27
+ from requests import Response
28
+ from requests.adapters import HTTPAdapter
29
+ from requests.models import PreparedRequest
30
+
31
+ from .. import constants
32
+ from . import logging
33
+ from ._typing import HTTP_METHOD_T
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ # Both headers are used by the Hub to debug failed requests.
39
+ # `X_AMZN_TRACE_ID` is better as it also works to debug on Cloudfront and ALB.
40
+ # If `X_AMZN_TRACE_ID` is set, the Hub will use it as well.
41
+ X_AMZN_TRACE_ID = "X-Amzn-Trace-Id"
42
+ X_REQUEST_ID = "x-request-id"
43
+
44
+
45
+ class OfflineModeIsEnabled(ConnectionError):
46
+ """Raised when a request is made but `HF_HUB_OFFLINE=1` is set as environment variable."""
47
+
48
+
49
+ class UniqueRequestIdAdapter(HTTPAdapter):
50
+ X_AMZN_TRACE_ID = "X-Amzn-Trace-Id"
51
+
52
+ def add_headers(self, request, **kwargs):
53
+ super().add_headers(request, **kwargs)
54
+
55
+ # Add random request ID => easier for server-side debug
56
+ if X_AMZN_TRACE_ID not in request.headers:
57
+ request.headers[X_AMZN_TRACE_ID] = request.headers.get(X_REQUEST_ID) or str(uuid.uuid4())
58
+
59
+ # Add debug log
60
+ has_token = str(request.headers.get("authorization", "")).startswith("Bearer hf_")
61
+ logger.debug(
62
+ f"Request {request.headers[X_AMZN_TRACE_ID]}: {request.method} {request.url} (authenticated: {has_token})"
63
+ )
64
+
65
+ def send(self, request: PreparedRequest, *args, **kwargs) -> Response:
66
+ """Catch any RequestException to append request id to the error message for debugging."""
67
+ try:
68
+ return super().send(request, *args, **kwargs)
69
+ except requests.RequestException as e:
70
+ request_id = request.headers.get(X_AMZN_TRACE_ID)
71
+ if request_id is not None:
72
+ # Taken from https://stackoverflow.com/a/58270258
73
+ e.args = (*e.args, f"(Request ID: {request_id})")
74
+ raise
75
+
76
+
77
+ class OfflineAdapter(HTTPAdapter):
78
+ def send(self, request: PreparedRequest, *args, **kwargs) -> Response:
79
+ raise OfflineModeIsEnabled(
80
+ f"Cannot reach {request.url}: offline mode is enabled. To disable it, please unset the `HF_HUB_OFFLINE` environment variable."
81
+ )
82
+
83
+
84
+ def _default_backend_factory() -> requests.Session:
85
+ session = requests.Session()
86
+ if constants.HF_HUB_OFFLINE:
87
+ session.mount("http://", OfflineAdapter())
88
+ session.mount("https://", OfflineAdapter())
89
+ else:
90
+ session.mount("http://", UniqueRequestIdAdapter())
91
+ session.mount("https://", UniqueRequestIdAdapter())
92
+ return session
93
+
94
+
95
+ BACKEND_FACTORY_T = Callable[[], requests.Session]
96
+ _GLOBAL_BACKEND_FACTORY: BACKEND_FACTORY_T = _default_backend_factory
97
+
98
+
99
+ def configure_http_backend(backend_factory: BACKEND_FACTORY_T = _default_backend_factory) -> None:
100
+ """
101
+ Configure the HTTP backend by providing a `backend_factory`. Any HTTP calls made by `huggingface_hub` will use a
102
+ Session object instantiated by this factory. This can be useful if you are running your scripts in a specific
103
+ environment requiring custom configuration (e.g. custom proxy or certifications).
104
+
105
+ Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe,
106
+ `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory`
107
+ set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between
108
+ calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned.
109
+
110
+ See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`.
111
+
112
+ Example:
113
+ ```py
114
+ import requests
115
+ from huggingface_hub import configure_http_backend, get_session
116
+
117
+ # Create a factory function that returns a Session with configured proxies
118
+ def backend_factory() -> requests.Session:
119
+ session = requests.Session()
120
+ session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"}
121
+ return session
122
+
123
+ # Set it as the default session factory
124
+ configure_http_backend(backend_factory=backend_factory)
125
+
126
+ # In practice, this is mostly done internally in `huggingface_hub`
127
+ session = get_session()
128
+ ```
129
+ """
130
+ global _GLOBAL_BACKEND_FACTORY
131
+ _GLOBAL_BACKEND_FACTORY = backend_factory
132
+ reset_sessions()
133
+
134
+
135
+ def get_session() -> requests.Session:
136
+ """
137
+ Get a `requests.Session` object, using the session factory from the user.
138
+
139
+ Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe,
140
+ `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory`
141
+ set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between
142
+ calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned.
143
+
144
+ See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`.
145
+
146
+ Example:
147
+ ```py
148
+ import requests
149
+ from huggingface_hub import configure_http_backend, get_session
150
+
151
+ # Create a factory function that returns a Session with configured proxies
152
+ def backend_factory() -> requests.Session:
153
+ session = requests.Session()
154
+ session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"}
155
+ return session
156
+
157
+ # Set it as the default session factory
158
+ configure_http_backend(backend_factory=backend_factory)
159
+
160
+ # In practice, this is mostly done internally in `huggingface_hub`
161
+ session = get_session()
162
+ ```
163
+ """
164
+ return _get_session_from_cache(process_id=os.getpid(), thread_id=threading.get_ident())
165
+
166
+
167
+ def reset_sessions() -> None:
168
+ """Reset the cache of sessions.
169
+
170
+ Mostly used internally when sessions are reconfigured or an SSLError is raised.
171
+ See [`configure_http_backend`] for more details.
172
+ """
173
+ _get_session_from_cache.cache_clear()
174
+
175
+
176
+ @lru_cache
177
+ def _get_session_from_cache(process_id: int, thread_id: int) -> requests.Session:
178
+ """
179
+ Create a new session per thread using global factory. Using LRU cache (maxsize 128) to avoid memory leaks when
180
+ using thousands of threads. Cache is cleared when `configure_http_backend` is called.
181
+ """
182
+ return _GLOBAL_BACKEND_FACTORY()
183
+
184
+
185
+ def http_backoff(
186
+ method: HTTP_METHOD_T,
187
+ url: str,
188
+ *,
189
+ max_retries: int = 5,
190
+ base_wait_time: float = 1,
191
+ max_wait_time: float = 8,
192
+ retry_on_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = (
193
+ requests.Timeout,
194
+ requests.ConnectionError,
195
+ ),
196
+ retry_on_status_codes: Union[int, Tuple[int, ...]] = HTTPStatus.SERVICE_UNAVAILABLE,
197
+ **kwargs,
198
+ ) -> Response:
199
+ """Wrapper around requests to retry calls on an endpoint, with exponential backoff.
200
+
201
+ Endpoint call is retried on exceptions (ex: connection timeout, proxy error,...)
202
+ and/or on specific status codes (ex: service unavailable). If the call failed more
203
+ than `max_retries`, the exception is thrown or `raise_for_status` is called on the
204
+ response object.
205
+
206
+ Re-implement mechanisms from the `backoff` library to avoid adding an external
207
+ dependencies to `hugging_face_hub`. See https://github.com/litl/backoff.
208
+
209
+ Args:
210
+ method (`Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"]`):
211
+ HTTP method to perform.
212
+ url (`str`):
213
+ The URL of the resource to fetch.
214
+ max_retries (`int`, *optional*, defaults to `5`):
215
+ Maximum number of retries, defaults to 5 (no retries).
216
+ base_wait_time (`float`, *optional*, defaults to `1`):
217
+ Duration (in seconds) to wait before retrying the first time.
218
+ Wait time between retries then grows exponentially, capped by
219
+ `max_wait_time`.
220
+ max_wait_time (`float`, *optional*, defaults to `8`):
221
+ Maximum duration (in seconds) to wait before retrying.
222
+ retry_on_exceptions (`Type[Exception]` or `Tuple[Type[Exception]]`, *optional*):
223
+ Define which exceptions must be caught to retry the request. Can be a single type or a tuple of types.
224
+ By default, retry on `requests.Timeout` and `requests.ConnectionError`.
225
+ retry_on_status_codes (`int` or `Tuple[int]`, *optional*, defaults to `503`):
226
+ Define on which status codes the request must be retried. By default, only
227
+ HTTP 503 Service Unavailable is retried.
228
+ **kwargs (`dict`, *optional*):
229
+ kwargs to pass to `requests.request`.
230
+
231
+ Example:
232
+ ```
233
+ >>> from huggingface_hub.utils import http_backoff
234
+
235
+ # Same usage as "requests.request".
236
+ >>> response = http_backoff("GET", "https://www.google.com")
237
+ >>> response.raise_for_status()
238
+
239
+ # If you expect a Gateway Timeout from time to time
240
+ >>> http_backoff("PUT", upload_url, data=data, retry_on_status_codes=504)
241
+ >>> response.raise_for_status()
242
+ ```
243
+
244
+ <Tip warning={true}>
245
+
246
+ When using `requests` it is possible to stream data by passing an iterator to the
247
+ `data` argument. On http backoff this is a problem as the iterator is not reset
248
+ after a failed call. This issue is mitigated for file objects or any IO streams
249
+ by saving the initial position of the cursor (with `data.tell()`) and resetting the
250
+ cursor between each call (with `data.seek()`). For arbitrary iterators, http backoff
251
+ will fail. If this is a hard constraint for you, please let us know by opening an
252
+ issue on [Github](https://github.com/huggingface/huggingface_hub).
253
+
254
+ </Tip>
255
+ """
256
+ if isinstance(retry_on_exceptions, type): # Tuple from single exception type
257
+ retry_on_exceptions = (retry_on_exceptions,)
258
+
259
+ if isinstance(retry_on_status_codes, int): # Tuple from single status code
260
+ retry_on_status_codes = (retry_on_status_codes,)
261
+
262
+ nb_tries = 0
263
+ sleep_time = base_wait_time
264
+
265
+ # If `data` is used and is a file object (or any IO), it will be consumed on the
266
+ # first HTTP request. We need to save the initial position so that the full content
267
+ # of the file is re-sent on http backoff. See warning tip in docstring.
268
+ io_obj_initial_pos = None
269
+ if "data" in kwargs and isinstance(kwargs["data"], io.IOBase):
270
+ io_obj_initial_pos = kwargs["data"].tell()
271
+
272
+ session = get_session()
273
+ while True:
274
+ nb_tries += 1
275
+ try:
276
+ # If `data` is used and is a file object (or any IO), set back cursor to
277
+ # initial position.
278
+ if io_obj_initial_pos is not None:
279
+ kwargs["data"].seek(io_obj_initial_pos)
280
+
281
+ # Perform request and return if status_code is not in the retry list.
282
+ response = session.request(method=method, url=url, **kwargs)
283
+ if response.status_code not in retry_on_status_codes:
284
+ return response
285
+
286
+ # Wrong status code returned (HTTP 503 for instance)
287
+ logger.warning(f"HTTP Error {response.status_code} thrown while requesting {method} {url}")
288
+ if nb_tries > max_retries:
289
+ response.raise_for_status() # Will raise uncaught exception
290
+ # We return response to avoid infinite loop in the corner case where the
291
+ # user ask for retry on a status code that doesn't raise_for_status.
292
+ return response
293
+
294
+ except retry_on_exceptions as err:
295
+ logger.warning(f"'{err}' thrown while requesting {method} {url}")
296
+
297
+ if isinstance(err, requests.ConnectionError):
298
+ reset_sessions() # In case of SSLError it's best to reset the shared requests.Session objects
299
+
300
+ if nb_tries > max_retries:
301
+ raise err
302
+
303
+ # Sleep for X seconds
304
+ logger.warning(f"Retrying in {sleep_time}s [Retry {nb_tries}/{max_retries}].")
305
+ time.sleep(sleep_time)
306
+
307
+ # Update sleep time for next retry
308
+ sleep_time = min(max_wait_time, sleep_time * 2) # Exponential backoff
309
+
310
+
311
+ def fix_hf_endpoint_in_url(url: str, endpoint: Optional[str]) -> str:
312
+ """Replace the default endpoint in a URL by a custom one.
313
+
314
+ This is useful when using a proxy and the Hugging Face Hub returns a URL with the default endpoint.
315
+ """
316
+ endpoint = endpoint or constants.ENDPOINT
317
+ # check if a proxy has been set => if yes, update the returned URL to use the proxy
318
+ if endpoint not in (None, constants._HF_DEFAULT_ENDPOINT, constants._HF_DEFAULT_STAGING_ENDPOINT):
319
+ url = url.replace(constants._HF_DEFAULT_ENDPOINT, endpoint)
320
+ url = url.replace(constants._HF_DEFAULT_STAGING_ENDPOINT, endpoint)
321
+ return url
venv/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to handle pagination on Huggingface Hub."""
16
+
17
+ from typing import Dict, Iterable, Optional
18
+
19
+ import requests
20
+
21
+ from . import get_session, hf_raise_for_status, logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def paginate(path: str, params: Dict, headers: Dict) -> Iterable:
28
+ """Fetch a list of models/datasets/spaces and paginate through results.
29
+
30
+ This is using the same "Link" header format as GitHub.
31
+ See:
32
+ - https://requests.readthedocs.io/en/latest/api/#requests.Response.links
33
+ - https://docs.github.com/en/rest/guides/traversing-with-pagination#link-header
34
+ """
35
+ session = get_session()
36
+ r = session.get(path, params=params, headers=headers)
37
+ hf_raise_for_status(r)
38
+ yield from r.json()
39
+
40
+ # Follow pages
41
+ # Next link already contains query params
42
+ next_page = _get_next_page(r)
43
+ while next_page is not None:
44
+ logger.debug(f"Pagination detected. Requesting next page: {next_page}")
45
+ r = session.get(next_page, headers=headers)
46
+ hf_raise_for_status(r)
47
+ yield from r.json()
48
+ next_page = _get_next_page(r)
49
+
50
+
51
+ def _get_next_page(response: requests.Response) -> Optional[str]:
52
+ return response.links.get("next", {}).get("url")