Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_commit_api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_snapshot_download.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_tensorboard_logger.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/community.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/inference_api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard_data.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__init__.py +27 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/_cli_utils.py +64 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/delete_cache.py +428 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/download.py +192 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/env.py +36 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py +55 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/lfs.py +199 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/scan_cache.py +139 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/tag.py +159 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/upload.py +298 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_client.py +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_common.py +472 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +116 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py +260 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py +29 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py +19 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py +50 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py +43 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py +55 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py +77 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/sentence_similarity.py +28 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py +45 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py +45 -0
- llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py +43 -0
llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_commit_api.cpython-310.pyc
ADDED
Binary file (22.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_snapshot_download.cpython-310.pyc
ADDED
Binary file (9.88 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_tensorboard_logger.cpython-310.pyc
ADDED
Binary file (6.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/community.cpython-310.pyc
ADDED
Binary file (13 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/inference_api.cpython-310.pyc
ADDED
Binary file (7.58 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard_data.cpython-310.pyc
ADDED
Binary file (27.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__init__.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
from argparse import _SubParsersAction
|
17 |
+
|
18 |
+
|
19 |
+
class BaseHuggingfaceCLICommand(ABC):
|
20 |
+
@staticmethod
|
21 |
+
@abstractmethod
|
22 |
+
def register_subcommand(parser: _SubParsersAction):
|
23 |
+
raise NotImplementedError()
|
24 |
+
|
25 |
+
@abstractmethod
|
26 |
+
def run(self):
|
27 |
+
raise NotImplementedError()
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/_cli_utils.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Contains a utility for good-looking prints."""
|
15 |
+
|
16 |
+
import os
|
17 |
+
from typing import List, Union
|
18 |
+
|
19 |
+
|
20 |
+
class ANSI:
|
21 |
+
"""
|
22 |
+
Helper for en.wikipedia.org/wiki/ANSI_escape_code
|
23 |
+
"""
|
24 |
+
|
25 |
+
_bold = "\u001b[1m"
|
26 |
+
_gray = "\u001b[90m"
|
27 |
+
_red = "\u001b[31m"
|
28 |
+
_reset = "\u001b[0m"
|
29 |
+
|
30 |
+
@classmethod
|
31 |
+
def bold(cls, s: str) -> str:
|
32 |
+
return cls._format(s, cls._bold)
|
33 |
+
|
34 |
+
@classmethod
|
35 |
+
def gray(cls, s: str) -> str:
|
36 |
+
return cls._format(s, cls._gray)
|
37 |
+
|
38 |
+
@classmethod
|
39 |
+
def red(cls, s: str) -> str:
|
40 |
+
return cls._format(s, cls._bold + cls._red)
|
41 |
+
|
42 |
+
@classmethod
|
43 |
+
def _format(cls, s: str, code: str) -> str:
|
44 |
+
if os.environ.get("NO_COLOR"):
|
45 |
+
# See https://no-color.org/
|
46 |
+
return s
|
47 |
+
return f"{code}{s}{cls._reset}"
|
48 |
+
|
49 |
+
|
50 |
+
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
|
51 |
+
"""
|
52 |
+
Inspired by:
|
53 |
+
|
54 |
+
- stackoverflow.com/a/8356620/593036
|
55 |
+
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
|
56 |
+
"""
|
57 |
+
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
|
58 |
+
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
|
59 |
+
lines = []
|
60 |
+
lines.append(row_format.format(*headers))
|
61 |
+
lines.append(row_format.format(*["-" * w for w in col_widths]))
|
62 |
+
for row in rows:
|
63 |
+
lines.append(row_format.format(*row))
|
64 |
+
return "\n".join(lines)
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/delete_cache.py
ADDED
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022-present, the HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Contains command to delete some revisions from the HF cache directory.
|
16 |
+
|
17 |
+
Usage:
|
18 |
+
huggingface-cli delete-cache
|
19 |
+
huggingface-cli delete-cache --disable-tui
|
20 |
+
huggingface-cli delete-cache --dir ~/.cache/huggingface/hub
|
21 |
+
|
22 |
+
NOTE:
|
23 |
+
This command is based on `InquirerPy` to build the multiselect menu in the terminal.
|
24 |
+
This dependency has to be installed with `pip install huggingface_hub[cli]`. Since
|
25 |
+
we want to avoid as much as possible cross-platform issues, I chose a library that
|
26 |
+
is built on top of `python-prompt-toolkit` which seems to be a reference in terminal
|
27 |
+
GUI (actively maintained on both Unix and Windows, 7.9k stars).
|
28 |
+
|
29 |
+
For the moment, the TUI feature is in beta.
|
30 |
+
|
31 |
+
See:
|
32 |
+
- https://github.com/kazhala/InquirerPy
|
33 |
+
- https://inquirerpy.readthedocs.io/en/latest/
|
34 |
+
- https://github.com/prompt-toolkit/python-prompt-toolkit
|
35 |
+
|
36 |
+
Other solutions could have been:
|
37 |
+
- `simple_term_menu`: would be good as well for our use case but some issues suggest
|
38 |
+
that Windows is less supported.
|
39 |
+
See: https://github.com/IngoMeyer441/simple-term-menu
|
40 |
+
- `PyInquirer`: very similar to `InquirerPy` but older and not maintained anymore.
|
41 |
+
In particular, no support of Python3.10.
|
42 |
+
See: https://github.com/CITGuru/PyInquirer
|
43 |
+
- `pick` (or `pickpack`): easy to use and flexible but built on top of Python's
|
44 |
+
standard library `curses` that is specific to Unix (not implemented on Windows).
|
45 |
+
See https://github.com/wong2/pick and https://github.com/anafvana/pickpack.
|
46 |
+
- `inquirer`: lot of traction (700 stars) but explicitly states "experimental
|
47 |
+
support of Windows". Not built on top of `python-prompt-toolkit`.
|
48 |
+
See https://github.com/magmax/python-inquirer
|
49 |
+
|
50 |
+
TODO: add support for `huggingface-cli delete-cache aaaaaa bbbbbb cccccc (...)` ?
|
51 |
+
TODO: add "--keep-last" arg to delete revisions that are not on `main` ref
|
52 |
+
TODO: add "--filter" arg to filter repositories by name ?
|
53 |
+
TODO: add "--sort" arg to sort by size ?
|
54 |
+
TODO: add "--limit" arg to limit to X repos ?
|
55 |
+
TODO: add "-y" arg for immediate deletion ?
|
56 |
+
See discussions in https://github.com/huggingface/huggingface_hub/issues/1025.
|
57 |
+
"""
|
58 |
+
|
59 |
+
import os
|
60 |
+
from argparse import Namespace, _SubParsersAction
|
61 |
+
from functools import wraps
|
62 |
+
from tempfile import mkstemp
|
63 |
+
from typing import Any, Callable, Iterable, List, Optional, Union
|
64 |
+
|
65 |
+
from ..utils import CachedRepoInfo, CachedRevisionInfo, HFCacheInfo, scan_cache_dir
|
66 |
+
from . import BaseHuggingfaceCLICommand
|
67 |
+
from ._cli_utils import ANSI
|
68 |
+
|
69 |
+
|
70 |
+
try:
|
71 |
+
from InquirerPy import inquirer
|
72 |
+
from InquirerPy.base.control import Choice
|
73 |
+
from InquirerPy.separator import Separator
|
74 |
+
|
75 |
+
_inquirer_py_available = True
|
76 |
+
except ImportError:
|
77 |
+
_inquirer_py_available = False
|
78 |
+
|
79 |
+
|
80 |
+
def require_inquirer_py(fn: Callable) -> Callable:
|
81 |
+
"""Decorator to flag methods that require `InquirerPy`."""
|
82 |
+
|
83 |
+
# TODO: refactor this + imports in a unified pattern across codebase
|
84 |
+
@wraps(fn)
|
85 |
+
def _inner(*args, **kwargs):
|
86 |
+
if not _inquirer_py_available:
|
87 |
+
raise ImportError(
|
88 |
+
"The `delete-cache` command requires extra dependencies to work with"
|
89 |
+
" the TUI.\nPlease run `pip install huggingface_hub[cli]` to install"
|
90 |
+
" them.\nOtherwise, disable TUI using the `--disable-tui` flag."
|
91 |
+
)
|
92 |
+
|
93 |
+
return fn(*args, **kwargs)
|
94 |
+
|
95 |
+
return _inner
|
96 |
+
|
97 |
+
|
98 |
+
# Possibility for the user to cancel deletion
|
99 |
+
_CANCEL_DELETION_STR = "CANCEL_DELETION"
|
100 |
+
|
101 |
+
|
102 |
+
class DeleteCacheCommand(BaseHuggingfaceCLICommand):
|
103 |
+
@staticmethod
|
104 |
+
def register_subcommand(parser: _SubParsersAction):
|
105 |
+
delete_cache_parser = parser.add_parser("delete-cache", help="Delete revisions from the cache directory.")
|
106 |
+
|
107 |
+
delete_cache_parser.add_argument(
|
108 |
+
"--dir",
|
109 |
+
type=str,
|
110 |
+
default=None,
|
111 |
+
help="cache directory (optional). Default to the default HuggingFace cache.",
|
112 |
+
)
|
113 |
+
|
114 |
+
delete_cache_parser.add_argument(
|
115 |
+
"--disable-tui",
|
116 |
+
action="store_true",
|
117 |
+
help=(
|
118 |
+
"Disable Terminal User Interface (TUI) mode. Useful if your"
|
119 |
+
" platform/terminal doesn't support the multiselect menu."
|
120 |
+
),
|
121 |
+
)
|
122 |
+
|
123 |
+
delete_cache_parser.set_defaults(func=DeleteCacheCommand)
|
124 |
+
|
125 |
+
def __init__(self, args: Namespace) -> None:
|
126 |
+
self.cache_dir: Optional[str] = args.dir
|
127 |
+
self.disable_tui: bool = args.disable_tui
|
128 |
+
|
129 |
+
def run(self):
|
130 |
+
"""Run `delete-cache` command with or without TUI."""
|
131 |
+
# Scan cache directory
|
132 |
+
hf_cache_info = scan_cache_dir(self.cache_dir)
|
133 |
+
|
134 |
+
# Manual review from the user
|
135 |
+
if self.disable_tui:
|
136 |
+
selected_hashes = _manual_review_no_tui(hf_cache_info, preselected=[])
|
137 |
+
else:
|
138 |
+
selected_hashes = _manual_review_tui(hf_cache_info, preselected=[])
|
139 |
+
|
140 |
+
# If deletion is not cancelled
|
141 |
+
if len(selected_hashes) > 0 and _CANCEL_DELETION_STR not in selected_hashes:
|
142 |
+
confirm_message = _get_expectations_str(hf_cache_info, selected_hashes) + " Confirm deletion ?"
|
143 |
+
|
144 |
+
# Confirm deletion
|
145 |
+
if self.disable_tui:
|
146 |
+
confirmed = _ask_for_confirmation_no_tui(confirm_message)
|
147 |
+
else:
|
148 |
+
confirmed = _ask_for_confirmation_tui(confirm_message)
|
149 |
+
|
150 |
+
# Deletion is confirmed
|
151 |
+
if confirmed:
|
152 |
+
strategy = hf_cache_info.delete_revisions(*selected_hashes)
|
153 |
+
print("Start deletion.")
|
154 |
+
strategy.execute()
|
155 |
+
print(
|
156 |
+
f"Done. Deleted {len(strategy.repos)} repo(s) and"
|
157 |
+
f" {len(strategy.snapshots)} revision(s) for a total of"
|
158 |
+
f" {strategy.expected_freed_size_str}."
|
159 |
+
)
|
160 |
+
return
|
161 |
+
|
162 |
+
# Deletion is cancelled
|
163 |
+
print("Deletion is cancelled. Do nothing.")
|
164 |
+
|
165 |
+
|
166 |
+
@require_inquirer_py
|
167 |
+
def _manual_review_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]:
|
168 |
+
"""Ask the user for a manual review of the revisions to delete.
|
169 |
+
|
170 |
+
Displays a multi-select menu in the terminal (TUI).
|
171 |
+
"""
|
172 |
+
# Define multiselect list
|
173 |
+
choices = _get_tui_choices_from_scan(repos=hf_cache_info.repos, preselected=preselected)
|
174 |
+
checkbox = inquirer.checkbox(
|
175 |
+
message="Select revisions to delete:",
|
176 |
+
choices=choices, # List of revisions with some pre-selection
|
177 |
+
cycle=False, # No loop between top and bottom
|
178 |
+
height=100, # Large list if possible
|
179 |
+
# We use the instruction to display to the user the expected effect of the
|
180 |
+
# deletion.
|
181 |
+
instruction=_get_expectations_str(
|
182 |
+
hf_cache_info,
|
183 |
+
selected_hashes=[c.value for c in choices if isinstance(c, Choice) and c.enabled],
|
184 |
+
),
|
185 |
+
# We use the long instruction to should keybindings instructions to the user
|
186 |
+
long_instruction="Press <space> to select, <enter> to validate and <ctrl+c> to quit without modification.",
|
187 |
+
# Message that is displayed once the user validates its selection.
|
188 |
+
transformer=lambda result: f"{len(result)} revision(s) selected.",
|
189 |
+
)
|
190 |
+
|
191 |
+
# Add a callback to update the information line when a revision is
|
192 |
+
# selected/unselected
|
193 |
+
def _update_expectations(_) -> None:
|
194 |
+
# Hacky way to dynamically set an instruction message to the checkbox when
|
195 |
+
# a revision hash is selected/unselected.
|
196 |
+
checkbox._instruction = _get_expectations_str(
|
197 |
+
hf_cache_info,
|
198 |
+
selected_hashes=[choice["value"] for choice in checkbox.content_control.choices if choice["enabled"]],
|
199 |
+
)
|
200 |
+
|
201 |
+
checkbox.kb_func_lookup["toggle"].append({"func": _update_expectations})
|
202 |
+
|
203 |
+
# Finally display the form to the user.
|
204 |
+
try:
|
205 |
+
return checkbox.execute()
|
206 |
+
except KeyboardInterrupt:
|
207 |
+
return [] # Quit without deletion
|
208 |
+
|
209 |
+
|
210 |
+
@require_inquirer_py
|
211 |
+
def _ask_for_confirmation_tui(message: str, default: bool = True) -> bool:
|
212 |
+
"""Ask for confirmation using Inquirer."""
|
213 |
+
return inquirer.confirm(message, default=default).execute()
|
214 |
+
|
215 |
+
|
216 |
+
def _get_tui_choices_from_scan(repos: Iterable[CachedRepoInfo], preselected: List[str]) -> List:
|
217 |
+
"""Build a list of choices from the scanned repos.
|
218 |
+
|
219 |
+
Args:
|
220 |
+
repos (*Iterable[`CachedRepoInfo`]*):
|
221 |
+
List of scanned repos on which we want to delete revisions.
|
222 |
+
preselected (*List[`str`]*):
|
223 |
+
List of revision hashes that will be preselected.
|
224 |
+
|
225 |
+
Return:
|
226 |
+
The list of choices to pass to `inquirer.checkbox`.
|
227 |
+
"""
|
228 |
+
choices: List[Union[Choice, Separator]] = []
|
229 |
+
|
230 |
+
# First choice is to cancel the deletion. If selected, nothing will be deleted,
|
231 |
+
# no matter the other selected items.
|
232 |
+
choices.append(
|
233 |
+
Choice(
|
234 |
+
_CANCEL_DELETION_STR,
|
235 |
+
name="None of the following (if selected, nothing will be deleted).",
|
236 |
+
enabled=False,
|
237 |
+
)
|
238 |
+
)
|
239 |
+
|
240 |
+
# Display a separator per repo and a Choice for each revisions of the repo
|
241 |
+
for repo in sorted(repos, key=_repo_sorting_order):
|
242 |
+
# Repo as separator
|
243 |
+
choices.append(
|
244 |
+
Separator(
|
245 |
+
f"\n{repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str},"
|
246 |
+
f" used {repo.last_accessed_str})"
|
247 |
+
)
|
248 |
+
)
|
249 |
+
for revision in sorted(repo.revisions, key=_revision_sorting_order):
|
250 |
+
# Revision as choice
|
251 |
+
choices.append(
|
252 |
+
Choice(
|
253 |
+
revision.commit_hash,
|
254 |
+
name=(
|
255 |
+
f"{revision.commit_hash[:8]}:"
|
256 |
+
f" {', '.join(sorted(revision.refs)) or '(detached)'} #"
|
257 |
+
f" modified {revision.last_modified_str}"
|
258 |
+
),
|
259 |
+
enabled=revision.commit_hash in preselected,
|
260 |
+
)
|
261 |
+
)
|
262 |
+
|
263 |
+
# Return choices
|
264 |
+
return choices
|
265 |
+
|
266 |
+
|
267 |
+
def _manual_review_no_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]:
|
268 |
+
"""Ask the user for a manual review of the revisions to delete.
|
269 |
+
|
270 |
+
Used when TUI is disabled. Manual review happens in a separate tmp file that the
|
271 |
+
user can manually edit.
|
272 |
+
"""
|
273 |
+
# 1. Generate temporary file with delete commands.
|
274 |
+
fd, tmp_path = mkstemp(suffix=".txt") # suffix to make it easier to find by editors
|
275 |
+
os.close(fd)
|
276 |
+
|
277 |
+
lines = []
|
278 |
+
for repo in sorted(hf_cache_info.repos, key=_repo_sorting_order):
|
279 |
+
lines.append(
|
280 |
+
f"\n# {repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str},"
|
281 |
+
f" used {repo.last_accessed_str})"
|
282 |
+
)
|
283 |
+
for revision in sorted(repo.revisions, key=_revision_sorting_order):
|
284 |
+
lines.append(
|
285 |
+
# Deselect by prepending a '#'
|
286 |
+
f"{'' if revision.commit_hash in preselected else '#'} "
|
287 |
+
f" {revision.commit_hash} # Refs:"
|
288 |
+
# Print `refs` as comment on same line
|
289 |
+
f" {', '.join(sorted(revision.refs)) or '(detached)'} # modified"
|
290 |
+
# Print `last_modified` as comment on same line
|
291 |
+
f" {revision.last_modified_str}"
|
292 |
+
)
|
293 |
+
|
294 |
+
with open(tmp_path, "w") as f:
|
295 |
+
f.write(_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS)
|
296 |
+
f.write("\n".join(lines))
|
297 |
+
|
298 |
+
# 2. Prompt instructions to user.
|
299 |
+
instructions = f"""
|
300 |
+
TUI is disabled. In order to select which revisions you want to delete, please edit
|
301 |
+
the following file using the text editor of your choice. Instructions for manual
|
302 |
+
editing are located at the beginning of the file. Edit the file, save it and confirm
|
303 |
+
to continue.
|
304 |
+
File to edit: {ANSI.bold(tmp_path)}
|
305 |
+
"""
|
306 |
+
print("\n".join(line.strip() for line in instructions.strip().split("\n")))
|
307 |
+
|
308 |
+
# 3. Wait for user confirmation.
|
309 |
+
while True:
|
310 |
+
selected_hashes = _read_manual_review_tmp_file(tmp_path)
|
311 |
+
if _ask_for_confirmation_no_tui(
|
312 |
+
_get_expectations_str(hf_cache_info, selected_hashes) + " Continue ?",
|
313 |
+
default=False,
|
314 |
+
):
|
315 |
+
break
|
316 |
+
|
317 |
+
# 4. Return selected_hashes
|
318 |
+
os.remove(tmp_path)
|
319 |
+
return selected_hashes
|
320 |
+
|
321 |
+
|
322 |
+
def _ask_for_confirmation_no_tui(message: str, default: bool = True) -> bool:
|
323 |
+
"""Ask for confirmation using pure-python."""
|
324 |
+
YES = ("y", "yes", "1")
|
325 |
+
NO = ("n", "no", "0")
|
326 |
+
DEFAULT = ""
|
327 |
+
ALL = YES + NO + (DEFAULT,)
|
328 |
+
full_message = message + (" (Y/n) " if default else " (y/N) ")
|
329 |
+
while True:
|
330 |
+
answer = input(full_message).lower()
|
331 |
+
if answer == DEFAULT:
|
332 |
+
return default
|
333 |
+
if answer in YES:
|
334 |
+
return True
|
335 |
+
if answer in NO:
|
336 |
+
return False
|
337 |
+
print(f"Invalid input. Must be one of {ALL}")
|
338 |
+
|
339 |
+
|
340 |
+
def _get_expectations_str(hf_cache_info: HFCacheInfo, selected_hashes: List[str]) -> str:
|
341 |
+
"""Format a string to display to the user how much space would be saved.
|
342 |
+
|
343 |
+
Example:
|
344 |
+
```
|
345 |
+
>>> _get_expectations_str(hf_cache_info, selected_hashes)
|
346 |
+
'7 revisions selected counting for 4.3G.'
|
347 |
+
```
|
348 |
+
"""
|
349 |
+
if _CANCEL_DELETION_STR in selected_hashes:
|
350 |
+
return "Nothing will be deleted."
|
351 |
+
strategy = hf_cache_info.delete_revisions(*selected_hashes)
|
352 |
+
return f"{len(selected_hashes)} revisions selected counting for {strategy.expected_freed_size_str}."
|
353 |
+
|
354 |
+
|
355 |
+
def _read_manual_review_tmp_file(tmp_path: str) -> List[str]:
|
356 |
+
"""Read the manually reviewed instruction file and return a list of revision hash.
|
357 |
+
|
358 |
+
Example:
|
359 |
+
```txt
|
360 |
+
# This is the tmp file content
|
361 |
+
###
|
362 |
+
|
363 |
+
# Commented out line
|
364 |
+
123456789 # revision hash
|
365 |
+
|
366 |
+
# Something else
|
367 |
+
# a_newer_hash # 2 days ago
|
368 |
+
an_older_hash # 3 days ago
|
369 |
+
```
|
370 |
+
|
371 |
+
```py
|
372 |
+
>>> _read_manual_review_tmp_file(tmp_path)
|
373 |
+
['123456789', 'an_older_hash']
|
374 |
+
```
|
375 |
+
"""
|
376 |
+
with open(tmp_path) as f:
|
377 |
+
content = f.read()
|
378 |
+
|
379 |
+
# Split lines
|
380 |
+
lines = [line.strip() for line in content.split("\n")]
|
381 |
+
|
382 |
+
# Filter commented lines
|
383 |
+
selected_lines = [line for line in lines if not line.startswith("#")]
|
384 |
+
|
385 |
+
# Select only before comment
|
386 |
+
selected_hashes = [line.split("#")[0].strip() for line in selected_lines]
|
387 |
+
|
388 |
+
# Return revision hashes
|
389 |
+
return [hash for hash in selected_hashes if len(hash) > 0]
|
390 |
+
|
391 |
+
|
392 |
+
_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS = f"""
|
393 |
+
# INSTRUCTIONS
|
394 |
+
# ------------
|
395 |
+
# This is a temporary file created by running `huggingface-cli delete-cache` with the
|
396 |
+
# `--disable-tui` option. It contains a set of revisions that can be deleted from your
|
397 |
+
# local cache directory.
|
398 |
+
#
|
399 |
+
# Please manually review the revisions you want to delete:
|
400 |
+
# - Revision hashes can be commented out with '#'.
|
401 |
+
# - Only non-commented revisions in this file will be deleted.
|
402 |
+
# - Revision hashes that are removed from this file are ignored as well.
|
403 |
+
# - If `{_CANCEL_DELETION_STR}` line is uncommented, the all cache deletion is cancelled and
|
404 |
+
# no changes will be applied.
|
405 |
+
#
|
406 |
+
# Once you've manually reviewed this file, please confirm deletion in the terminal. This
|
407 |
+
# file will be automatically removed once done.
|
408 |
+
# ------------
|
409 |
+
|
410 |
+
# KILL SWITCH
|
411 |
+
# ------------
|
412 |
+
# Un-comment following line to completely cancel the deletion process
|
413 |
+
# {_CANCEL_DELETION_STR}
|
414 |
+
# ------------
|
415 |
+
|
416 |
+
# REVISIONS
|
417 |
+
# ------------
|
418 |
+
""".strip()
|
419 |
+
|
420 |
+
|
421 |
+
def _repo_sorting_order(repo: CachedRepoInfo) -> Any:
|
422 |
+
# First split by Dataset/Model, then sort by last accessed (oldest first)
|
423 |
+
return (repo.repo_type, repo.last_accessed)
|
424 |
+
|
425 |
+
|
426 |
+
def _revision_sorting_order(revision: CachedRevisionInfo) -> Any:
|
427 |
+
# Sort by last modified (oldest first)
|
428 |
+
return revision.last_modified
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/download.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023-present, the HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Contains command to download files from the Hub with the CLI.
|
16 |
+
|
17 |
+
Usage:
|
18 |
+
huggingface-cli download --help
|
19 |
+
|
20 |
+
# Download file
|
21 |
+
huggingface-cli download gpt2 config.json
|
22 |
+
|
23 |
+
# Download entire repo
|
24 |
+
huggingface-cli download fffiloni/zeroscope --repo-type=space --revision=refs/pr/78
|
25 |
+
|
26 |
+
# Download repo with filters
|
27 |
+
huggingface-cli download gpt2 --include="*.safetensors"
|
28 |
+
|
29 |
+
# Download with token
|
30 |
+
huggingface-cli download Wauplin/private-model --token=hf_***
|
31 |
+
|
32 |
+
# Download quietly (no progress bar, no warnings, only the returned path)
|
33 |
+
huggingface-cli download gpt2 config.json --quiet
|
34 |
+
|
35 |
+
# Download to local dir
|
36 |
+
huggingface-cli download gpt2 --local-dir=./models/gpt2
|
37 |
+
"""
|
38 |
+
|
39 |
+
import warnings
|
40 |
+
from argparse import Namespace, _SubParsersAction
|
41 |
+
from typing import List, Optional
|
42 |
+
|
43 |
+
from huggingface_hub import logging
|
44 |
+
from huggingface_hub._snapshot_download import snapshot_download
|
45 |
+
from huggingface_hub.commands import BaseHuggingfaceCLICommand
|
46 |
+
from huggingface_hub.file_download import hf_hub_download
|
47 |
+
from huggingface_hub.utils import disable_progress_bars, enable_progress_bars
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
|
53 |
+
class DownloadCommand(BaseHuggingfaceCLICommand):
|
54 |
+
@staticmethod
|
55 |
+
def register_subcommand(parser: _SubParsersAction):
|
56 |
+
download_parser = parser.add_parser("download", help="Download files from the Hub")
|
57 |
+
download_parser.add_argument(
|
58 |
+
"repo_id", type=str, help="ID of the repo to download from (e.g. `username/repo-name`)."
|
59 |
+
)
|
60 |
+
download_parser.add_argument(
|
61 |
+
"filenames", type=str, nargs="*", help="Files to download (e.g. `config.json`, `data/metadata.jsonl`)."
|
62 |
+
)
|
63 |
+
download_parser.add_argument(
|
64 |
+
"--repo-type",
|
65 |
+
choices=["model", "dataset", "space"],
|
66 |
+
default="model",
|
67 |
+
help="Type of repo to download from (defaults to 'model').",
|
68 |
+
)
|
69 |
+
download_parser.add_argument(
|
70 |
+
"--revision",
|
71 |
+
type=str,
|
72 |
+
help="An optional Git revision id which can be a branch name, a tag, or a commit hash.",
|
73 |
+
)
|
74 |
+
download_parser.add_argument(
|
75 |
+
"--include", nargs="*", type=str, help="Glob patterns to match files to download."
|
76 |
+
)
|
77 |
+
download_parser.add_argument(
|
78 |
+
"--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to download."
|
79 |
+
)
|
80 |
+
download_parser.add_argument(
|
81 |
+
"--cache-dir", type=str, help="Path to the directory where to save the downloaded files."
|
82 |
+
)
|
83 |
+
download_parser.add_argument(
|
84 |
+
"--local-dir",
|
85 |
+
type=str,
|
86 |
+
help=(
|
87 |
+
"If set, the downloaded file will be placed under this directory. Check out"
|
88 |
+
" https://huggingface.co/docs/huggingface_hub/guides/download#download-files-to-local-folder for more"
|
89 |
+
" details."
|
90 |
+
),
|
91 |
+
)
|
92 |
+
download_parser.add_argument(
|
93 |
+
"--local-dir-use-symlinks",
|
94 |
+
choices=["auto", "True", "False"],
|
95 |
+
help=("Deprecated and ignored. Downloading to a local directory does not use symlinks anymore."),
|
96 |
+
)
|
97 |
+
download_parser.add_argument(
|
98 |
+
"--force-download",
|
99 |
+
action="store_true",
|
100 |
+
help="If True, the files will be downloaded even if they are already cached.",
|
101 |
+
)
|
102 |
+
download_parser.add_argument(
|
103 |
+
"--resume-download",
|
104 |
+
action="store_true",
|
105 |
+
help="Deprecated and ignored. Downloading a file to local dir always attempts to resume previously interrupted downloads (unless hf-transfer is enabled).",
|
106 |
+
)
|
107 |
+
download_parser.add_argument(
|
108 |
+
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
|
109 |
+
)
|
110 |
+
download_parser.add_argument(
|
111 |
+
"--quiet",
|
112 |
+
action="store_true",
|
113 |
+
help="If True, progress bars are disabled and only the path to the download files is printed.",
|
114 |
+
)
|
115 |
+
download_parser.set_defaults(func=DownloadCommand)
|
116 |
+
|
117 |
+
def __init__(self, args: Namespace) -> None:
|
118 |
+
self.token = args.token
|
119 |
+
self.repo_id: str = args.repo_id
|
120 |
+
self.filenames: List[str] = args.filenames
|
121 |
+
self.repo_type: str = args.repo_type
|
122 |
+
self.revision: Optional[str] = args.revision
|
123 |
+
self.include: Optional[List[str]] = args.include
|
124 |
+
self.exclude: Optional[List[str]] = args.exclude
|
125 |
+
self.cache_dir: Optional[str] = args.cache_dir
|
126 |
+
self.local_dir: Optional[str] = args.local_dir
|
127 |
+
self.force_download: bool = args.force_download
|
128 |
+
self.resume_download: Optional[bool] = args.resume_download or None
|
129 |
+
self.quiet: bool = args.quiet
|
130 |
+
|
131 |
+
if args.local_dir_use_symlinks is not None:
|
132 |
+
warnings.warn(
|
133 |
+
"Ignoring --local-dir-use-symlinks. Downloading to a local directory does not use symlinks anymore.",
|
134 |
+
FutureWarning,
|
135 |
+
)
|
136 |
+
|
137 |
+
def run(self) -> None:
|
138 |
+
if self.quiet:
|
139 |
+
disable_progress_bars()
|
140 |
+
with warnings.catch_warnings():
|
141 |
+
warnings.simplefilter("ignore")
|
142 |
+
print(self._download()) # Print path to downloaded files
|
143 |
+
enable_progress_bars()
|
144 |
+
else:
|
145 |
+
logging.set_verbosity_info()
|
146 |
+
print(self._download()) # Print path to downloaded files
|
147 |
+
logging.set_verbosity_warning()
|
148 |
+
|
149 |
+
def _download(self) -> str:
|
150 |
+
# Warn user if patterns are ignored
|
151 |
+
if len(self.filenames) > 0:
|
152 |
+
if self.include is not None and len(self.include) > 0:
|
153 |
+
warnings.warn("Ignoring `--include` since filenames have being explicitly set.")
|
154 |
+
if self.exclude is not None and len(self.exclude) > 0:
|
155 |
+
warnings.warn("Ignoring `--exclude` since filenames have being explicitly set.")
|
156 |
+
|
157 |
+
# Single file to download: use `hf_hub_download`
|
158 |
+
if len(self.filenames) == 1:
|
159 |
+
return hf_hub_download(
|
160 |
+
repo_id=self.repo_id,
|
161 |
+
repo_type=self.repo_type,
|
162 |
+
revision=self.revision,
|
163 |
+
filename=self.filenames[0],
|
164 |
+
cache_dir=self.cache_dir,
|
165 |
+
resume_download=self.resume_download,
|
166 |
+
force_download=self.force_download,
|
167 |
+
token=self.token,
|
168 |
+
local_dir=self.local_dir,
|
169 |
+
library_name="huggingface-cli",
|
170 |
+
)
|
171 |
+
|
172 |
+
# Otherwise: use `snapshot_download` to ensure all files comes from same revision
|
173 |
+
elif len(self.filenames) == 0:
|
174 |
+
allow_patterns = self.include
|
175 |
+
ignore_patterns = self.exclude
|
176 |
+
else:
|
177 |
+
allow_patterns = self.filenames
|
178 |
+
ignore_patterns = None
|
179 |
+
|
180 |
+
return snapshot_download(
|
181 |
+
repo_id=self.repo_id,
|
182 |
+
repo_type=self.repo_type,
|
183 |
+
revision=self.revision,
|
184 |
+
allow_patterns=allow_patterns,
|
185 |
+
ignore_patterns=ignore_patterns,
|
186 |
+
resume_download=self.resume_download,
|
187 |
+
force_download=self.force_download,
|
188 |
+
cache_dir=self.cache_dir,
|
189 |
+
token=self.token,
|
190 |
+
local_dir=self.local_dir,
|
191 |
+
library_name="huggingface-cli",
|
192 |
+
)
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/env.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Contains command to print information about the environment.
|
15 |
+
|
16 |
+
Usage:
|
17 |
+
huggingface-cli env
|
18 |
+
"""
|
19 |
+
|
20 |
+
from argparse import _SubParsersAction
|
21 |
+
|
22 |
+
from ..utils import dump_environment_info
|
23 |
+
from . import BaseHuggingfaceCLICommand
|
24 |
+
|
25 |
+
|
26 |
+
class EnvironmentCommand(BaseHuggingfaceCLICommand):
|
27 |
+
def __init__(self, args):
|
28 |
+
self.args = args
|
29 |
+
|
30 |
+
@staticmethod
|
31 |
+
def register_subcommand(parser: _SubParsersAction):
|
32 |
+
env_parser = parser.add_parser("env", help="Print information about the environment.")
|
33 |
+
env_parser.set_defaults(func=EnvironmentCommand)
|
34 |
+
|
35 |
+
def run(self) -> None:
|
36 |
+
dump_environment_info()
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
from argparse import ArgumentParser
|
17 |
+
|
18 |
+
from huggingface_hub.commands.delete_cache import DeleteCacheCommand
|
19 |
+
from huggingface_hub.commands.download import DownloadCommand
|
20 |
+
from huggingface_hub.commands.env import EnvironmentCommand
|
21 |
+
from huggingface_hub.commands.lfs import LfsCommands
|
22 |
+
from huggingface_hub.commands.scan_cache import ScanCacheCommand
|
23 |
+
from huggingface_hub.commands.tag import TagCommands
|
24 |
+
from huggingface_hub.commands.upload import UploadCommand
|
25 |
+
from huggingface_hub.commands.user import UserCommands
|
26 |
+
|
27 |
+
|
28 |
+
def main():
|
29 |
+
parser = ArgumentParser("huggingface-cli", usage="huggingface-cli <command> [<args>]")
|
30 |
+
commands_parser = parser.add_subparsers(help="huggingface-cli command helpers")
|
31 |
+
|
32 |
+
# Register commands
|
33 |
+
EnvironmentCommand.register_subcommand(commands_parser)
|
34 |
+
UserCommands.register_subcommand(commands_parser)
|
35 |
+
UploadCommand.register_subcommand(commands_parser)
|
36 |
+
DownloadCommand.register_subcommand(commands_parser)
|
37 |
+
LfsCommands.register_subcommand(commands_parser)
|
38 |
+
ScanCacheCommand.register_subcommand(commands_parser)
|
39 |
+
DeleteCacheCommand.register_subcommand(commands_parser)
|
40 |
+
TagCommands.register_subcommand(commands_parser)
|
41 |
+
|
42 |
+
# Let's go
|
43 |
+
args = parser.parse_args()
|
44 |
+
|
45 |
+
if not hasattr(args, "func"):
|
46 |
+
parser.print_help()
|
47 |
+
exit(1)
|
48 |
+
|
49 |
+
# Run
|
50 |
+
service = args.func(args)
|
51 |
+
service.run()
|
52 |
+
|
53 |
+
|
54 |
+
if __name__ == "__main__":
|
55 |
+
main()
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/lfs.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Implementation of a custom transfer agent for the transfer type "multipart" for
|
3 |
+
git-lfs.
|
4 |
+
|
5 |
+
Inspired by:
|
6 |
+
github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
|
7 |
+
|
8 |
+
Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
|
9 |
+
|
10 |
+
|
11 |
+
To launch debugger while developing:
|
12 |
+
|
13 |
+
``` [lfs "customtransfer.multipart"]
|
14 |
+
path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678
|
15 |
+
--wait-for-client
|
16 |
+
/path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py
|
17 |
+
lfs-multipart-upload ```"""
|
18 |
+
|
19 |
+
import json
|
20 |
+
import os
|
21 |
+
import subprocess
|
22 |
+
import sys
|
23 |
+
from argparse import _SubParsersAction
|
24 |
+
from typing import Dict, List, Optional
|
25 |
+
|
26 |
+
from huggingface_hub.commands import BaseHuggingfaceCLICommand
|
27 |
+
from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND, SliceFileObj
|
28 |
+
|
29 |
+
from ..utils import get_session, hf_raise_for_status, logging
|
30 |
+
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
|
35 |
+
class LfsCommands(BaseHuggingfaceCLICommand):
|
36 |
+
"""
|
37 |
+
Implementation of a custom transfer agent for the transfer type "multipart"
|
38 |
+
for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom
|
39 |
+
transfer agent is:
|
40 |
+
https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
|
41 |
+
|
42 |
+
This introduces two commands to the CLI:
|
43 |
+
|
44 |
+
1. $ huggingface-cli lfs-enable-largefiles
|
45 |
+
|
46 |
+
This should be executed once for each model repo that contains a model file
|
47 |
+
>5GB. It's documented in the error message you get if you just try to git
|
48 |
+
push a 5GB file without having enabled it before.
|
49 |
+
|
50 |
+
2. $ huggingface-cli lfs-multipart-upload
|
51 |
+
|
52 |
+
This command is called by lfs directly and is not meant to be called by the
|
53 |
+
user.
|
54 |
+
"""
|
55 |
+
|
56 |
+
@staticmethod
|
57 |
+
def register_subcommand(parser: _SubParsersAction):
|
58 |
+
enable_parser = parser.add_parser(
|
59 |
+
"lfs-enable-largefiles", help="Configure your repository to enable upload of files > 5GB."
|
60 |
+
)
|
61 |
+
enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
|
62 |
+
enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
|
63 |
+
|
64 |
+
# Command will get called by git-lfs, do not call it directly.
|
65 |
+
upload_parser = parser.add_parser(LFS_MULTIPART_UPLOAD_COMMAND, add_help=False)
|
66 |
+
upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
|
67 |
+
|
68 |
+
|
69 |
+
class LfsEnableCommand:
|
70 |
+
def __init__(self, args):
|
71 |
+
self.args = args
|
72 |
+
|
73 |
+
def run(self):
|
74 |
+
local_path = os.path.abspath(self.args.path)
|
75 |
+
if not os.path.isdir(local_path):
|
76 |
+
print("This does not look like a valid git repo.")
|
77 |
+
exit(1)
|
78 |
+
subprocess.run(
|
79 |
+
"git config lfs.customtransfer.multipart.path huggingface-cli".split(),
|
80 |
+
check=True,
|
81 |
+
cwd=local_path,
|
82 |
+
)
|
83 |
+
subprocess.run(
|
84 |
+
f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
|
85 |
+
check=True,
|
86 |
+
cwd=local_path,
|
87 |
+
)
|
88 |
+
print("Local repo set up for largefiles")
|
89 |
+
|
90 |
+
|
91 |
+
def write_msg(msg: Dict):
|
92 |
+
"""Write out the message in Line delimited JSON."""
|
93 |
+
msg_str = json.dumps(msg) + "\n"
|
94 |
+
sys.stdout.write(msg_str)
|
95 |
+
sys.stdout.flush()
|
96 |
+
|
97 |
+
|
98 |
+
def read_msg() -> Optional[Dict]:
|
99 |
+
"""Read Line delimited JSON from stdin."""
|
100 |
+
msg = json.loads(sys.stdin.readline().strip())
|
101 |
+
|
102 |
+
if "terminate" in (msg.get("type"), msg.get("event")):
|
103 |
+
# terminate message received
|
104 |
+
return None
|
105 |
+
|
106 |
+
if msg.get("event") not in ("download", "upload"):
|
107 |
+
logger.critical("Received unexpected message")
|
108 |
+
sys.exit(1)
|
109 |
+
|
110 |
+
return msg
|
111 |
+
|
112 |
+
|
113 |
+
class LfsUploadCommand:
|
114 |
+
def __init__(self, args) -> None:
|
115 |
+
self.args = args
|
116 |
+
|
117 |
+
def run(self) -> None:
|
118 |
+
# Immediately after invoking a custom transfer process, git-lfs
|
119 |
+
# sends initiation data to the process over stdin.
|
120 |
+
# This tells the process useful information about the configuration.
|
121 |
+
init_msg = json.loads(sys.stdin.readline().strip())
|
122 |
+
if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
|
123 |
+
write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
|
124 |
+
sys.exit(1)
|
125 |
+
|
126 |
+
# The transfer process should use the information it needs from the
|
127 |
+
# initiation structure, and also perform any one-off setup tasks it
|
128 |
+
# needs to do. It should then respond on stdout with a simple empty
|
129 |
+
# confirmation structure, as follows:
|
130 |
+
write_msg({})
|
131 |
+
|
132 |
+
# After the initiation exchange, git-lfs will send any number of
|
133 |
+
# transfer requests to the stdin of the transfer process, in a serial sequence.
|
134 |
+
while True:
|
135 |
+
msg = read_msg()
|
136 |
+
if msg is None:
|
137 |
+
# When all transfers have been processed, git-lfs will send
|
138 |
+
# a terminate event to the stdin of the transfer process.
|
139 |
+
# On receiving this message the transfer process should
|
140 |
+
# clean up and terminate. No response is expected.
|
141 |
+
sys.exit(0)
|
142 |
+
|
143 |
+
oid = msg["oid"]
|
144 |
+
filepath = msg["path"]
|
145 |
+
completion_url = msg["action"]["href"]
|
146 |
+
header = msg["action"]["header"]
|
147 |
+
chunk_size = int(header.pop("chunk_size"))
|
148 |
+
presigned_urls: List[str] = list(header.values())
|
149 |
+
|
150 |
+
# Send a "started" progress event to allow other workers to start.
|
151 |
+
# Otherwise they're delayed until first "progress" event is reported,
|
152 |
+
# i.e. after the first 5GB by default (!)
|
153 |
+
write_msg(
|
154 |
+
{
|
155 |
+
"event": "progress",
|
156 |
+
"oid": oid,
|
157 |
+
"bytesSoFar": 1,
|
158 |
+
"bytesSinceLast": 0,
|
159 |
+
}
|
160 |
+
)
|
161 |
+
|
162 |
+
parts = []
|
163 |
+
with open(filepath, "rb") as file:
|
164 |
+
for i, presigned_url in enumerate(presigned_urls):
|
165 |
+
with SliceFileObj(
|
166 |
+
file,
|
167 |
+
seek_from=i * chunk_size,
|
168 |
+
read_limit=chunk_size,
|
169 |
+
) as data:
|
170 |
+
r = get_session().put(presigned_url, data=data)
|
171 |
+
hf_raise_for_status(r)
|
172 |
+
parts.append(
|
173 |
+
{
|
174 |
+
"etag": r.headers.get("etag"),
|
175 |
+
"partNumber": i + 1,
|
176 |
+
}
|
177 |
+
)
|
178 |
+
# In order to support progress reporting while data is uploading / downloading,
|
179 |
+
# the transfer process should post messages to stdout
|
180 |
+
write_msg(
|
181 |
+
{
|
182 |
+
"event": "progress",
|
183 |
+
"oid": oid,
|
184 |
+
"bytesSoFar": (i + 1) * chunk_size,
|
185 |
+
"bytesSinceLast": chunk_size,
|
186 |
+
}
|
187 |
+
)
|
188 |
+
# Not precise but that's ok.
|
189 |
+
|
190 |
+
r = get_session().post(
|
191 |
+
completion_url,
|
192 |
+
json={
|
193 |
+
"oid": oid,
|
194 |
+
"parts": parts,
|
195 |
+
},
|
196 |
+
)
|
197 |
+
hf_raise_for_status(r)
|
198 |
+
|
199 |
+
write_msg({"event": "complete", "oid": oid})
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/scan_cache.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022-present, the HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Contains command to scan the HF cache directory.
|
16 |
+
|
17 |
+
Usage:
|
18 |
+
huggingface-cli scan-cache
|
19 |
+
huggingface-cli scan-cache -v
|
20 |
+
huggingface-cli scan-cache -vvv
|
21 |
+
huggingface-cli scan-cache --dir ~/.cache/huggingface/hub
|
22 |
+
"""
|
23 |
+
|
24 |
+
import time
|
25 |
+
from argparse import Namespace, _SubParsersAction
|
26 |
+
from typing import Optional
|
27 |
+
|
28 |
+
from ..utils import CacheNotFound, HFCacheInfo, scan_cache_dir
|
29 |
+
from . import BaseHuggingfaceCLICommand
|
30 |
+
from ._cli_utils import ANSI, tabulate
|
31 |
+
|
32 |
+
|
33 |
+
class ScanCacheCommand(BaseHuggingfaceCLICommand):
|
34 |
+
@staticmethod
|
35 |
+
def register_subcommand(parser: _SubParsersAction):
|
36 |
+
scan_cache_parser = parser.add_parser("scan-cache", help="Scan cache directory.")
|
37 |
+
|
38 |
+
scan_cache_parser.add_argument(
|
39 |
+
"--dir",
|
40 |
+
type=str,
|
41 |
+
default=None,
|
42 |
+
help="cache directory to scan (optional). Default to the default HuggingFace cache.",
|
43 |
+
)
|
44 |
+
scan_cache_parser.add_argument(
|
45 |
+
"-v",
|
46 |
+
"--verbose",
|
47 |
+
action="count",
|
48 |
+
default=0,
|
49 |
+
help="show a more verbose output",
|
50 |
+
)
|
51 |
+
scan_cache_parser.set_defaults(func=ScanCacheCommand)
|
52 |
+
|
53 |
+
def __init__(self, args: Namespace) -> None:
|
54 |
+
self.verbosity: int = args.verbose
|
55 |
+
self.cache_dir: Optional[str] = args.dir
|
56 |
+
|
57 |
+
def run(self):
|
58 |
+
try:
|
59 |
+
t0 = time.time()
|
60 |
+
hf_cache_info = scan_cache_dir(self.cache_dir)
|
61 |
+
t1 = time.time()
|
62 |
+
except CacheNotFound as exc:
|
63 |
+
cache_dir = exc.cache_dir
|
64 |
+
print(f"Cache directory not found: {cache_dir}")
|
65 |
+
return
|
66 |
+
|
67 |
+
self._print_hf_cache_info_as_table(hf_cache_info)
|
68 |
+
|
69 |
+
print(
|
70 |
+
f"\nDone in {round(t1-t0,1)}s. Scanned {len(hf_cache_info.repos)} repo(s)"
|
71 |
+
f" for a total of {ANSI.red(hf_cache_info.size_on_disk_str)}."
|
72 |
+
)
|
73 |
+
if len(hf_cache_info.warnings) > 0:
|
74 |
+
message = f"Got {len(hf_cache_info.warnings)} warning(s) while scanning."
|
75 |
+
if self.verbosity >= 3:
|
76 |
+
print(ANSI.gray(message))
|
77 |
+
for warning in hf_cache_info.warnings:
|
78 |
+
print(ANSI.gray(warning))
|
79 |
+
else:
|
80 |
+
print(ANSI.gray(message + " Use -vvv to print details."))
|
81 |
+
|
82 |
+
def _print_hf_cache_info_as_table(self, hf_cache_info: HFCacheInfo) -> None:
|
83 |
+
if self.verbosity == 0:
|
84 |
+
print(
|
85 |
+
tabulate(
|
86 |
+
rows=[
|
87 |
+
[
|
88 |
+
repo.repo_id,
|
89 |
+
repo.repo_type,
|
90 |
+
"{:>12}".format(repo.size_on_disk_str),
|
91 |
+
repo.nb_files,
|
92 |
+
repo.last_accessed_str,
|
93 |
+
repo.last_modified_str,
|
94 |
+
", ".join(sorted(repo.refs)),
|
95 |
+
str(repo.repo_path),
|
96 |
+
]
|
97 |
+
for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path)
|
98 |
+
],
|
99 |
+
headers=[
|
100 |
+
"REPO ID",
|
101 |
+
"REPO TYPE",
|
102 |
+
"SIZE ON DISK",
|
103 |
+
"NB FILES",
|
104 |
+
"LAST_ACCESSED",
|
105 |
+
"LAST_MODIFIED",
|
106 |
+
"REFS",
|
107 |
+
"LOCAL PATH",
|
108 |
+
],
|
109 |
+
)
|
110 |
+
)
|
111 |
+
else:
|
112 |
+
print(
|
113 |
+
tabulate(
|
114 |
+
rows=[
|
115 |
+
[
|
116 |
+
repo.repo_id,
|
117 |
+
repo.repo_type,
|
118 |
+
revision.commit_hash,
|
119 |
+
"{:>12}".format(revision.size_on_disk_str),
|
120 |
+
revision.nb_files,
|
121 |
+
revision.last_modified_str,
|
122 |
+
", ".join(sorted(revision.refs)),
|
123 |
+
str(revision.snapshot_path),
|
124 |
+
]
|
125 |
+
for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path)
|
126 |
+
for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash)
|
127 |
+
],
|
128 |
+
headers=[
|
129 |
+
"REPO ID",
|
130 |
+
"REPO TYPE",
|
131 |
+
"REVISION",
|
132 |
+
"SIZE ON DISK",
|
133 |
+
"NB FILES",
|
134 |
+
"LAST_MODIFIED",
|
135 |
+
"REFS",
|
136 |
+
"LOCAL PATH",
|
137 |
+
],
|
138 |
+
)
|
139 |
+
)
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/tag.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024-present, the HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Contains commands to perform tag management with the CLI.
|
17 |
+
|
18 |
+
Usage Examples:
|
19 |
+
- Create a tag:
|
20 |
+
$ huggingface-cli tag user/my-model 1.0 --message "First release"
|
21 |
+
$ huggingface-cli tag user/my-model 1.0 -m "First release" --revision develop
|
22 |
+
$ huggingface-cli tag user/my-dataset 1.0 -m "First release" --repo-type dataset
|
23 |
+
$ huggingface-cli tag user/my-space 1.0
|
24 |
+
- List all tags:
|
25 |
+
$ huggingface-cli tag -l user/my-model
|
26 |
+
$ huggingface-cli tag --list user/my-dataset --repo-type dataset
|
27 |
+
- Delete a tag:
|
28 |
+
$ huggingface-cli tag -d user/my-model 1.0
|
29 |
+
$ huggingface-cli tag --delete user/my-dataset 1.0 --repo-type dataset
|
30 |
+
$ huggingface-cli tag -d user/my-space 1.0 -y
|
31 |
+
"""
|
32 |
+
|
33 |
+
from argparse import Namespace, _SubParsersAction
|
34 |
+
|
35 |
+
from requests.exceptions import HTTPError
|
36 |
+
|
37 |
+
from huggingface_hub.commands import BaseHuggingfaceCLICommand
|
38 |
+
from huggingface_hub.constants import (
|
39 |
+
REPO_TYPES,
|
40 |
+
)
|
41 |
+
from huggingface_hub.hf_api import HfApi
|
42 |
+
|
43 |
+
from ..utils import HfHubHTTPError, RepositoryNotFoundError, RevisionNotFoundError
|
44 |
+
from ._cli_utils import ANSI
|
45 |
+
|
46 |
+
|
47 |
+
class TagCommands(BaseHuggingfaceCLICommand):
|
48 |
+
@staticmethod
|
49 |
+
def register_subcommand(parser: _SubParsersAction):
|
50 |
+
tag_parser = parser.add_parser("tag", help="(create, list, delete) tags for a repo in the hub")
|
51 |
+
|
52 |
+
tag_parser.add_argument("repo_id", type=str, help="The ID of the repo to tag (e.g. `username/repo-name`).")
|
53 |
+
tag_parser.add_argument("tag", nargs="?", type=str, help="The name of the tag for creation or deletion.")
|
54 |
+
tag_parser.add_argument("-m", "--message", type=str, help="The description of the tag to create.")
|
55 |
+
tag_parser.add_argument("--revision", type=str, help="The git revision to tag.")
|
56 |
+
tag_parser.add_argument(
|
57 |
+
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens."
|
58 |
+
)
|
59 |
+
tag_parser.add_argument(
|
60 |
+
"--repo-type",
|
61 |
+
choices=["model", "dataset", "space"],
|
62 |
+
default="model",
|
63 |
+
help="Set the type of repository (model, dataset, or space).",
|
64 |
+
)
|
65 |
+
tag_parser.add_argument("-y", "--yes", action="store_true", help="Answer Yes to prompts automatically.")
|
66 |
+
|
67 |
+
tag_parser.add_argument("-l", "--list", action="store_true", help="List tags for a repository.")
|
68 |
+
tag_parser.add_argument("-d", "--delete", action="store_true", help="Delete a tag for a repository.")
|
69 |
+
|
70 |
+
tag_parser.set_defaults(func=lambda args: handle_commands(args))
|
71 |
+
|
72 |
+
|
73 |
+
def handle_commands(args: Namespace):
|
74 |
+
if args.list:
|
75 |
+
return TagListCommand(args)
|
76 |
+
elif args.delete:
|
77 |
+
return TagDeleteCommand(args)
|
78 |
+
else:
|
79 |
+
return TagCreateCommand(args)
|
80 |
+
|
81 |
+
|
82 |
+
class TagCommand:
|
83 |
+
def __init__(self, args: Namespace):
|
84 |
+
self.args = args
|
85 |
+
self.api = HfApi(token=self.args.token)
|
86 |
+
self.repo_id = self.args.repo_id
|
87 |
+
self.repo_type = self.args.repo_type
|
88 |
+
if self.repo_type not in REPO_TYPES:
|
89 |
+
print("Invalid repo --repo-type")
|
90 |
+
exit(1)
|
91 |
+
|
92 |
+
|
93 |
+
class TagCreateCommand(TagCommand):
|
94 |
+
def run(self):
|
95 |
+
print(f"You are about to create tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}")
|
96 |
+
|
97 |
+
try:
|
98 |
+
self.api.create_tag(
|
99 |
+
repo_id=self.repo_id,
|
100 |
+
tag=self.args.tag,
|
101 |
+
tag_message=self.args.message,
|
102 |
+
revision=self.args.revision,
|
103 |
+
repo_type=self.repo_type,
|
104 |
+
)
|
105 |
+
except RepositoryNotFoundError:
|
106 |
+
print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")
|
107 |
+
exit(1)
|
108 |
+
except RevisionNotFoundError:
|
109 |
+
print(f"Revision {ANSI.bold(self.args.revision)} not found.")
|
110 |
+
exit(1)
|
111 |
+
except HfHubHTTPError as e:
|
112 |
+
if e.response.status_code == 409:
|
113 |
+
print(f"Tag {ANSI.bold(self.args.tag)} already exists on {ANSI.bold(self.repo_id)}")
|
114 |
+
exit(1)
|
115 |
+
raise e
|
116 |
+
|
117 |
+
print(f"Tag {ANSI.bold(self.args.tag)} created on {ANSI.bold(self.repo_id)}")
|
118 |
+
|
119 |
+
|
120 |
+
class TagListCommand(TagCommand):
|
121 |
+
def run(self):
|
122 |
+
try:
|
123 |
+
refs = self.api.list_repo_refs(
|
124 |
+
repo_id=self.repo_id,
|
125 |
+
repo_type=self.repo_type,
|
126 |
+
)
|
127 |
+
except RepositoryNotFoundError:
|
128 |
+
print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")
|
129 |
+
exit(1)
|
130 |
+
except HTTPError as e:
|
131 |
+
print(e)
|
132 |
+
print(ANSI.red(e.response.text))
|
133 |
+
exit(1)
|
134 |
+
if len(refs.tags) == 0:
|
135 |
+
print("No tags found")
|
136 |
+
exit(0)
|
137 |
+
print(f"Tags for {self.repo_type} {ANSI.bold(self.repo_id)}:")
|
138 |
+
for tag in refs.tags:
|
139 |
+
print(tag.name)
|
140 |
+
|
141 |
+
|
142 |
+
class TagDeleteCommand(TagCommand):
|
143 |
+
def run(self):
|
144 |
+
print(f"You are about to delete tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}")
|
145 |
+
|
146 |
+
if not self.args.yes:
|
147 |
+
choice = input("Proceed? [Y/n] ").lower()
|
148 |
+
if choice not in ("", "y", "yes"):
|
149 |
+
print("Abort")
|
150 |
+
exit()
|
151 |
+
try:
|
152 |
+
self.api.delete_tag(repo_id=self.repo_id, tag=self.args.tag, repo_type=self.repo_type)
|
153 |
+
except RepositoryNotFoundError:
|
154 |
+
print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")
|
155 |
+
exit(1)
|
156 |
+
except RevisionNotFoundError:
|
157 |
+
print(f"Tag {ANSI.bold(self.args.tag)} not found on {ANSI.bold(self.repo_id)}")
|
158 |
+
exit(1)
|
159 |
+
print(f"Tag {ANSI.bold(self.args.tag)} deleted on {ANSI.bold(self.repo_id)}")
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/upload.py
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023-present, the HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Contains command to upload a repo or file with the CLI.
|
16 |
+
|
17 |
+
Usage:
|
18 |
+
# Upload file (implicit)
|
19 |
+
huggingface-cli upload my-cool-model ./my-cool-model.safetensors
|
20 |
+
|
21 |
+
# Upload file (explicit)
|
22 |
+
huggingface-cli upload my-cool-model ./my-cool-model.safetensors model.safetensors
|
23 |
+
|
24 |
+
# Upload directory (implicit). If `my-cool-model/` is a directory it will be uploaded, otherwise an exception is raised.
|
25 |
+
huggingface-cli upload my-cool-model
|
26 |
+
|
27 |
+
# Upload directory (explicit)
|
28 |
+
huggingface-cli upload my-cool-model ./models/my-cool-model .
|
29 |
+
|
30 |
+
# Upload filtered directory (example: tensorboard logs except for the last run)
|
31 |
+
huggingface-cli upload my-cool-model ./model/training /logs --include "*.tfevents.*" --exclude "*20230905*"
|
32 |
+
|
33 |
+
# Upload private dataset
|
34 |
+
huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type=dataset --private
|
35 |
+
|
36 |
+
# Upload with token
|
37 |
+
huggingface-cli upload Wauplin/my-cool-model --token=hf_****
|
38 |
+
|
39 |
+
# Sync local Space with Hub (upload new files, delete removed files)
|
40 |
+
huggingface-cli upload Wauplin/space-example --repo-type=space --exclude="/logs/*" --delete="*" --commit-message="Sync local Space with Hub"
|
41 |
+
|
42 |
+
# Schedule commits every 30 minutes
|
43 |
+
huggingface-cli upload Wauplin/my-cool-model --every=30
|
44 |
+
"""
|
45 |
+
|
46 |
+
import os
|
47 |
+
import time
|
48 |
+
import warnings
|
49 |
+
from argparse import Namespace, _SubParsersAction
|
50 |
+
from typing import List, Optional
|
51 |
+
|
52 |
+
from huggingface_hub import logging
|
53 |
+
from huggingface_hub._commit_scheduler import CommitScheduler
|
54 |
+
from huggingface_hub.commands import BaseHuggingfaceCLICommand
|
55 |
+
from huggingface_hub.constants import HF_HUB_ENABLE_HF_TRANSFER
|
56 |
+
from huggingface_hub.hf_api import HfApi
|
57 |
+
from huggingface_hub.utils import RevisionNotFoundError, disable_progress_bars, enable_progress_bars
|
58 |
+
|
59 |
+
|
60 |
+
logger = logging.get_logger(__name__)
|
61 |
+
|
62 |
+
|
63 |
+
class UploadCommand(BaseHuggingfaceCLICommand):
|
64 |
+
@staticmethod
|
65 |
+
def register_subcommand(parser: _SubParsersAction):
|
66 |
+
upload_parser = parser.add_parser("upload", help="Upload a file or a folder to a repo on the Hub")
|
67 |
+
upload_parser.add_argument(
|
68 |
+
"repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)."
|
69 |
+
)
|
70 |
+
upload_parser.add_argument(
|
71 |
+
"local_path", nargs="?", help="Local path to the file or folder to upload. Defaults to current directory."
|
72 |
+
)
|
73 |
+
upload_parser.add_argument(
|
74 |
+
"path_in_repo",
|
75 |
+
nargs="?",
|
76 |
+
help="Path of the file or folder in the repo. Defaults to the relative path of the file or folder.",
|
77 |
+
)
|
78 |
+
upload_parser.add_argument(
|
79 |
+
"--repo-type",
|
80 |
+
choices=["model", "dataset", "space"],
|
81 |
+
default="model",
|
82 |
+
help="Type of the repo to upload to (e.g. `dataset`).",
|
83 |
+
)
|
84 |
+
upload_parser.add_argument(
|
85 |
+
"--revision",
|
86 |
+
type=str,
|
87 |
+
help=(
|
88 |
+
"An optional Git revision to push to. It can be a branch name or a PR reference. If revision does not"
|
89 |
+
" exist and `--create-pr` is not set, a branch will be automatically created."
|
90 |
+
),
|
91 |
+
)
|
92 |
+
upload_parser.add_argument(
|
93 |
+
"--private",
|
94 |
+
action="store_true",
|
95 |
+
help=(
|
96 |
+
"Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already"
|
97 |
+
" exists."
|
98 |
+
),
|
99 |
+
)
|
100 |
+
upload_parser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.")
|
101 |
+
upload_parser.add_argument(
|
102 |
+
"--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload."
|
103 |
+
)
|
104 |
+
upload_parser.add_argument(
|
105 |
+
"--delete",
|
106 |
+
nargs="*",
|
107 |
+
type=str,
|
108 |
+
help="Glob patterns for file to be deleted from the repo while committing.",
|
109 |
+
)
|
110 |
+
upload_parser.add_argument(
|
111 |
+
"--commit-message", type=str, help="The summary / title / first line of the generated commit."
|
112 |
+
)
|
113 |
+
upload_parser.add_argument("--commit-description", type=str, help="The description of the generated commit.")
|
114 |
+
upload_parser.add_argument(
|
115 |
+
"--create-pr", action="store_true", help="Whether to upload content as a new Pull Request."
|
116 |
+
)
|
117 |
+
upload_parser.add_argument(
|
118 |
+
"--every",
|
119 |
+
type=float,
|
120 |
+
help="If set, a background job is scheduled to create commits every `every` minutes.",
|
121 |
+
)
|
122 |
+
upload_parser.add_argument(
|
123 |
+
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
|
124 |
+
)
|
125 |
+
upload_parser.add_argument(
|
126 |
+
"--quiet",
|
127 |
+
action="store_true",
|
128 |
+
help="If True, progress bars are disabled and only the path to the uploaded files is printed.",
|
129 |
+
)
|
130 |
+
upload_parser.set_defaults(func=UploadCommand)
|
131 |
+
|
132 |
+
def __init__(self, args: Namespace) -> None:
|
133 |
+
self.repo_id: str = args.repo_id
|
134 |
+
self.repo_type: Optional[str] = args.repo_type
|
135 |
+
self.revision: Optional[str] = args.revision
|
136 |
+
self.private: bool = args.private
|
137 |
+
|
138 |
+
self.include: Optional[List[str]] = args.include
|
139 |
+
self.exclude: Optional[List[str]] = args.exclude
|
140 |
+
self.delete: Optional[List[str]] = args.delete
|
141 |
+
|
142 |
+
self.commit_message: Optional[str] = args.commit_message
|
143 |
+
self.commit_description: Optional[str] = args.commit_description
|
144 |
+
self.create_pr: bool = args.create_pr
|
145 |
+
self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli")
|
146 |
+
self.quiet: bool = args.quiet # disable warnings and progress bars
|
147 |
+
|
148 |
+
# Check `--every` is valid
|
149 |
+
if args.every is not None and args.every <= 0:
|
150 |
+
raise ValueError(f"`every` must be a positive value (got '{args.every}')")
|
151 |
+
self.every: Optional[float] = args.every
|
152 |
+
|
153 |
+
# Resolve `local_path` and `path_in_repo`
|
154 |
+
repo_name: str = args.repo_id.split("/")[-1] # e.g. "Wauplin/my-cool-model" => "my-cool-model"
|
155 |
+
self.local_path: str
|
156 |
+
self.path_in_repo: str
|
157 |
+
if args.local_path is None and os.path.isfile(repo_name):
|
158 |
+
# Implicit case 1: user provided only a repo_id which happen to be a local file as well => upload it with same name
|
159 |
+
self.local_path = repo_name
|
160 |
+
self.path_in_repo = repo_name
|
161 |
+
elif args.local_path is None and os.path.isdir(repo_name):
|
162 |
+
# Implicit case 2: user provided only a repo_id which happen to be a local folder as well => upload it at root
|
163 |
+
self.local_path = repo_name
|
164 |
+
self.path_in_repo = "."
|
165 |
+
elif args.local_path is None:
|
166 |
+
# Implicit case 3: user provided only a repo_id that does not match a local file or folder
|
167 |
+
# => the user must explicitly provide a local_path => raise exception
|
168 |
+
raise ValueError(f"'{repo_name}' is not a local file or folder. Please set `local_path` explicitly.")
|
169 |
+
elif args.path_in_repo is None and os.path.isfile(args.local_path):
|
170 |
+
# Explicit local path to file, no path in repo => upload it at root with same name
|
171 |
+
self.local_path = args.local_path
|
172 |
+
self.path_in_repo = os.path.basename(args.local_path)
|
173 |
+
elif args.path_in_repo is None:
|
174 |
+
# Explicit local path to folder, no path in repo => upload at root
|
175 |
+
self.local_path = args.local_path
|
176 |
+
self.path_in_repo = "."
|
177 |
+
else:
|
178 |
+
# Finally, if both paths are explicit
|
179 |
+
self.local_path = args.local_path
|
180 |
+
self.path_in_repo = args.path_in_repo
|
181 |
+
|
182 |
+
def run(self) -> None:
|
183 |
+
if self.quiet:
|
184 |
+
disable_progress_bars()
|
185 |
+
with warnings.catch_warnings():
|
186 |
+
warnings.simplefilter("ignore")
|
187 |
+
print(self._upload())
|
188 |
+
enable_progress_bars()
|
189 |
+
else:
|
190 |
+
logging.set_verbosity_info()
|
191 |
+
print(self._upload())
|
192 |
+
logging.set_verbosity_warning()
|
193 |
+
|
194 |
+
def _upload(self) -> str:
|
195 |
+
if os.path.isfile(self.local_path):
|
196 |
+
if self.include is not None and len(self.include) > 0:
|
197 |
+
warnings.warn("Ignoring `--include` since a single file is uploaded.")
|
198 |
+
if self.exclude is not None and len(self.exclude) > 0:
|
199 |
+
warnings.warn("Ignoring `--exclude` since a single file is uploaded.")
|
200 |
+
if self.delete is not None and len(self.delete) > 0:
|
201 |
+
warnings.warn("Ignoring `--delete` since a single file is uploaded.")
|
202 |
+
|
203 |
+
if not HF_HUB_ENABLE_HF_TRANSFER:
|
204 |
+
logger.info(
|
205 |
+
"Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See"
|
206 |
+
" https://huggingface.co/docs/huggingface_hub/hf_transfer for more details."
|
207 |
+
)
|
208 |
+
|
209 |
+
# Schedule commits if `every` is set
|
210 |
+
if self.every is not None:
|
211 |
+
if os.path.isfile(self.local_path):
|
212 |
+
# If file => watch entire folder + use allow_patterns
|
213 |
+
folder_path = os.path.dirname(self.local_path)
|
214 |
+
path_in_repo = (
|
215 |
+
self.path_in_repo[: -len(self.local_path)] # remove filename from path_in_repo
|
216 |
+
if self.path_in_repo.endswith(self.local_path)
|
217 |
+
else self.path_in_repo
|
218 |
+
)
|
219 |
+
allow_patterns = [self.local_path]
|
220 |
+
ignore_patterns = []
|
221 |
+
else:
|
222 |
+
folder_path = self.local_path
|
223 |
+
path_in_repo = self.path_in_repo
|
224 |
+
allow_patterns = self.include or []
|
225 |
+
ignore_patterns = self.exclude or []
|
226 |
+
if self.delete is not None and len(self.delete) > 0:
|
227 |
+
warnings.warn("Ignoring `--delete` when uploading with scheduled commits.")
|
228 |
+
|
229 |
+
scheduler = CommitScheduler(
|
230 |
+
folder_path=folder_path,
|
231 |
+
repo_id=self.repo_id,
|
232 |
+
repo_type=self.repo_type,
|
233 |
+
revision=self.revision,
|
234 |
+
allow_patterns=allow_patterns,
|
235 |
+
ignore_patterns=ignore_patterns,
|
236 |
+
path_in_repo=path_in_repo,
|
237 |
+
private=self.private,
|
238 |
+
every=self.every,
|
239 |
+
hf_api=self.api,
|
240 |
+
)
|
241 |
+
print(f"Scheduling commits every {self.every} minutes to {scheduler.repo_id}.")
|
242 |
+
try: # Block main thread until KeyboardInterrupt
|
243 |
+
while True:
|
244 |
+
time.sleep(100)
|
245 |
+
except KeyboardInterrupt:
|
246 |
+
scheduler.stop()
|
247 |
+
return "Stopped scheduled commits."
|
248 |
+
|
249 |
+
# Otherwise, create repo and proceed with the upload
|
250 |
+
if not os.path.isfile(self.local_path) and not os.path.isdir(self.local_path):
|
251 |
+
raise FileNotFoundError(f"No such file or directory: '{self.local_path}'.")
|
252 |
+
repo_id = self.api.create_repo(
|
253 |
+
repo_id=self.repo_id,
|
254 |
+
repo_type=self.repo_type,
|
255 |
+
exist_ok=True,
|
256 |
+
private=self.private,
|
257 |
+
space_sdk="gradio" if self.repo_type == "space" else None,
|
258 |
+
# ^ We don't want it to fail when uploading to a Space => let's set Gradio by default.
|
259 |
+
# ^ I'd rather not add CLI args to set it explicitly as we already have `huggingface-cli repo create` for that.
|
260 |
+
).repo_id
|
261 |
+
|
262 |
+
# Check if branch already exists and if not, create it
|
263 |
+
if self.revision is not None and not self.create_pr:
|
264 |
+
try:
|
265 |
+
self.api.repo_info(repo_id=repo_id, repo_type=self.repo_type, revision=self.revision)
|
266 |
+
except RevisionNotFoundError:
|
267 |
+
logger.info(f"Branch '{self.revision}' not found. Creating it...")
|
268 |
+
self.api.create_branch(repo_id=repo_id, repo_type=self.repo_type, branch=self.revision, exist_ok=True)
|
269 |
+
# ^ `exist_ok=True` to avoid race concurrency issues
|
270 |
+
|
271 |
+
# File-based upload
|
272 |
+
if os.path.isfile(self.local_path):
|
273 |
+
return self.api.upload_file(
|
274 |
+
path_or_fileobj=self.local_path,
|
275 |
+
path_in_repo=self.path_in_repo,
|
276 |
+
repo_id=repo_id,
|
277 |
+
repo_type=self.repo_type,
|
278 |
+
revision=self.revision,
|
279 |
+
commit_message=self.commit_message,
|
280 |
+
commit_description=self.commit_description,
|
281 |
+
create_pr=self.create_pr,
|
282 |
+
)
|
283 |
+
|
284 |
+
# Folder-based upload
|
285 |
+
else:
|
286 |
+
return self.api.upload_folder(
|
287 |
+
folder_path=self.local_path,
|
288 |
+
path_in_repo=self.path_in_repo,
|
289 |
+
repo_id=repo_id,
|
290 |
+
repo_type=self.repo_type,
|
291 |
+
revision=self.revision,
|
292 |
+
commit_message=self.commit_message,
|
293 |
+
commit_description=self.commit_description,
|
294 |
+
create_pr=self.create_pr,
|
295 |
+
allow_patterns=self.include,
|
296 |
+
ignore_patterns=self.exclude,
|
297 |
+
delete_patterns=self.delete,
|
298 |
+
)
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_client.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_common.py
ADDED
@@ -0,0 +1,472 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023-present, the HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Contains utilities used by both the sync and async inference clients."""
|
16 |
+
|
17 |
+
import base64
|
18 |
+
import io
|
19 |
+
import json
|
20 |
+
import logging
|
21 |
+
from contextlib import contextmanager
|
22 |
+
from dataclasses import dataclass
|
23 |
+
from pathlib import Path
|
24 |
+
from typing import (
|
25 |
+
TYPE_CHECKING,
|
26 |
+
Any,
|
27 |
+
AsyncIterable,
|
28 |
+
BinaryIO,
|
29 |
+
ContextManager,
|
30 |
+
Dict,
|
31 |
+
Generator,
|
32 |
+
Iterable,
|
33 |
+
List,
|
34 |
+
Literal,
|
35 |
+
NoReturn,
|
36 |
+
Optional,
|
37 |
+
Set,
|
38 |
+
Union,
|
39 |
+
overload,
|
40 |
+
)
|
41 |
+
|
42 |
+
from requests import HTTPError
|
43 |
+
|
44 |
+
from huggingface_hub.errors import (
|
45 |
+
GenerationError,
|
46 |
+
IncompleteGenerationError,
|
47 |
+
OverloadedError,
|
48 |
+
TextGenerationError,
|
49 |
+
UnknownError,
|
50 |
+
ValidationError,
|
51 |
+
)
|
52 |
+
|
53 |
+
from ..constants import ENDPOINT
|
54 |
+
from ..utils import (
|
55 |
+
build_hf_headers,
|
56 |
+
get_session,
|
57 |
+
hf_raise_for_status,
|
58 |
+
is_aiohttp_available,
|
59 |
+
is_numpy_available,
|
60 |
+
is_pillow_available,
|
61 |
+
)
|
62 |
+
from ._generated.types import (
|
63 |
+
ChatCompletionStreamOutput,
|
64 |
+
ChatCompletionStreamOutputChoice,
|
65 |
+
ChatCompletionStreamOutputDelta,
|
66 |
+
TextGenerationStreamOutput,
|
67 |
+
)
|
68 |
+
|
69 |
+
|
70 |
+
if TYPE_CHECKING:
|
71 |
+
from aiohttp import ClientResponse, ClientSession
|
72 |
+
from PIL.Image import Image
|
73 |
+
|
74 |
+
# TYPES
|
75 |
+
UrlT = str
|
76 |
+
PathT = Union[str, Path]
|
77 |
+
BinaryT = Union[bytes, BinaryIO]
|
78 |
+
ContentT = Union[BinaryT, PathT, UrlT]
|
79 |
+
|
80 |
+
# Use to set a Accept: image/png header
|
81 |
+
TASKS_EXPECTING_IMAGES = {"text-to-image", "image-to-image"}
|
82 |
+
|
83 |
+
logger = logging.getLogger(__name__)
|
84 |
+
|
85 |
+
|
86 |
+
# Add dataclass for ModelStatus. We use this dataclass in get_model_status function.
|
87 |
+
@dataclass
|
88 |
+
class ModelStatus:
|
89 |
+
"""
|
90 |
+
This Dataclass represents the the model status in the Hugging Face Inference API.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
loaded (`bool`):
|
94 |
+
If the model is currently loaded into Hugging Face's InferenceAPI. Models
|
95 |
+
are loaded on-demand, leading to the user's first request taking longer.
|
96 |
+
If a model is loaded, you can be assured that it is in a healthy state.
|
97 |
+
state (`str`):
|
98 |
+
The current state of the model. This can be 'Loaded', 'Loadable', 'TooBig'.
|
99 |
+
If a model's state is 'Loadable', it's not too big and has a supported
|
100 |
+
backend. Loadable models are automatically loaded when the user first
|
101 |
+
requests inference on the endpoint. This means it is transparent for the
|
102 |
+
user to load a model, except that the first call takes longer to complete.
|
103 |
+
compute_type (`Dict`):
|
104 |
+
Information about the compute resource the model is using or will use, such as 'gpu' type and number of
|
105 |
+
replicas.
|
106 |
+
framework (`str`):
|
107 |
+
The name of the framework that the model was built with, such as 'transformers'
|
108 |
+
or 'text-generation-inference'.
|
109 |
+
"""
|
110 |
+
|
111 |
+
loaded: bool
|
112 |
+
state: str
|
113 |
+
compute_type: Dict
|
114 |
+
framework: str
|
115 |
+
|
116 |
+
|
117 |
+
## IMPORT UTILS
|
118 |
+
|
119 |
+
|
120 |
+
def _import_aiohttp():
|
121 |
+
# Make sure `aiohttp` is installed on the machine.
|
122 |
+
if not is_aiohttp_available():
|
123 |
+
raise ImportError("Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).")
|
124 |
+
import aiohttp
|
125 |
+
|
126 |
+
return aiohttp
|
127 |
+
|
128 |
+
|
129 |
+
def _import_numpy():
|
130 |
+
"""Make sure `numpy` is installed on the machine."""
|
131 |
+
if not is_numpy_available():
|
132 |
+
raise ImportError("Please install numpy to use deal with embeddings (`pip install numpy`).")
|
133 |
+
import numpy
|
134 |
+
|
135 |
+
return numpy
|
136 |
+
|
137 |
+
|
138 |
+
def _import_pil_image():
|
139 |
+
"""Make sure `PIL` is installed on the machine."""
|
140 |
+
if not is_pillow_available():
|
141 |
+
raise ImportError(
|
142 |
+
"Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be"
|
143 |
+
" post-processed, use `client.post(...)` and get the raw response from the server."
|
144 |
+
)
|
145 |
+
from PIL import Image
|
146 |
+
|
147 |
+
return Image
|
148 |
+
|
149 |
+
|
150 |
+
## RECOMMENDED MODELS
|
151 |
+
|
152 |
+
# Will be globally fetched only once (see '_fetch_recommended_models')
|
153 |
+
_RECOMMENDED_MODELS: Optional[Dict[str, Optional[str]]] = None
|
154 |
+
|
155 |
+
|
156 |
+
def _fetch_recommended_models() -> Dict[str, Optional[str]]:
|
157 |
+
global _RECOMMENDED_MODELS
|
158 |
+
if _RECOMMENDED_MODELS is None:
|
159 |
+
response = get_session().get(f"{ENDPOINT}/api/tasks", headers=build_hf_headers())
|
160 |
+
hf_raise_for_status(response)
|
161 |
+
_RECOMMENDED_MODELS = {
|
162 |
+
task: _first_or_none(details["widgetModels"]) for task, details in response.json().items()
|
163 |
+
}
|
164 |
+
return _RECOMMENDED_MODELS
|
165 |
+
|
166 |
+
|
167 |
+
def _first_or_none(items: List[Any]) -> Optional[Any]:
|
168 |
+
try:
|
169 |
+
return items[0] or None
|
170 |
+
except IndexError:
|
171 |
+
return None
|
172 |
+
|
173 |
+
|
174 |
+
## ENCODING / DECODING UTILS
|
175 |
+
|
176 |
+
|
177 |
+
@overload
|
178 |
+
def _open_as_binary(
|
179 |
+
content: ContentT,
|
180 |
+
) -> ContextManager[BinaryT]: ... # means "if input is not None, output is not None"
|
181 |
+
|
182 |
+
|
183 |
+
@overload
|
184 |
+
def _open_as_binary(
|
185 |
+
content: Literal[None],
|
186 |
+
) -> ContextManager[Literal[None]]: ... # means "if input is None, output is None"
|
187 |
+
|
188 |
+
|
189 |
+
@contextmanager # type: ignore
|
190 |
+
def _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]:
|
191 |
+
"""Open `content` as a binary file, either from a URL, a local path, or raw bytes.
|
192 |
+
|
193 |
+
Do nothing if `content` is None,
|
194 |
+
|
195 |
+
TODO: handle a PIL.Image as input
|
196 |
+
TODO: handle base64 as input
|
197 |
+
"""
|
198 |
+
# If content is a string => must be either a URL or a path
|
199 |
+
if isinstance(content, str):
|
200 |
+
if content.startswith("https://") or content.startswith("http://"):
|
201 |
+
logger.debug(f"Downloading content from {content}")
|
202 |
+
yield get_session().get(content).content # TODO: retrieve as stream and pipe to post request ?
|
203 |
+
return
|
204 |
+
content = Path(content)
|
205 |
+
if not content.exists():
|
206 |
+
raise FileNotFoundError(
|
207 |
+
f"File not found at {content}. If `data` is a string, it must either be a URL or a path to a local"
|
208 |
+
" file. To pass raw content, please encode it as bytes first."
|
209 |
+
)
|
210 |
+
|
211 |
+
# If content is a Path => open it
|
212 |
+
if isinstance(content, Path):
|
213 |
+
logger.debug(f"Opening content from {content}")
|
214 |
+
with content.open("rb") as f:
|
215 |
+
yield f
|
216 |
+
else:
|
217 |
+
# Otherwise: already a file-like object or None
|
218 |
+
yield content
|
219 |
+
|
220 |
+
|
221 |
+
def _b64_encode(content: ContentT) -> str:
|
222 |
+
"""Encode a raw file (image, audio) into base64. Can be byes, an opened file, a path or a URL."""
|
223 |
+
with _open_as_binary(content) as data:
|
224 |
+
data_as_bytes = data if isinstance(data, bytes) else data.read()
|
225 |
+
return base64.b64encode(data_as_bytes).decode()
|
226 |
+
|
227 |
+
|
228 |
+
def _b64_to_image(encoded_image: str) -> "Image":
|
229 |
+
"""Parse a base64-encoded string into a PIL Image."""
|
230 |
+
Image = _import_pil_image()
|
231 |
+
return Image.open(io.BytesIO(base64.b64decode(encoded_image)))
|
232 |
+
|
233 |
+
|
234 |
+
def _bytes_to_list(content: bytes) -> List:
|
235 |
+
"""Parse bytes from a Response object into a Python list.
|
236 |
+
|
237 |
+
Expects the response body to be JSON-encoded data.
|
238 |
+
|
239 |
+
NOTE: This is exactly the same implementation as `_bytes_to_dict` and will not complain if the returned data is a
|
240 |
+
dictionary. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.
|
241 |
+
"""
|
242 |
+
return json.loads(content.decode())
|
243 |
+
|
244 |
+
|
245 |
+
def _bytes_to_dict(content: bytes) -> Dict:
|
246 |
+
"""Parse bytes from a Response object into a Python dictionary.
|
247 |
+
|
248 |
+
Expects the response body to be JSON-encoded data.
|
249 |
+
|
250 |
+
NOTE: This is exactly the same implementation as `_bytes_to_list` and will not complain if the returned data is a
|
251 |
+
list. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.
|
252 |
+
"""
|
253 |
+
return json.loads(content.decode())
|
254 |
+
|
255 |
+
|
256 |
+
def _bytes_to_image(content: bytes) -> "Image":
|
257 |
+
"""Parse bytes from a Response object into a PIL Image.
|
258 |
+
|
259 |
+
Expects the response body to be raw bytes. To deal with b64 encoded images, use `_b64_to_image` instead.
|
260 |
+
"""
|
261 |
+
Image = _import_pil_image()
|
262 |
+
return Image.open(io.BytesIO(content))
|
263 |
+
|
264 |
+
|
265 |
+
## STREAMING UTILS
|
266 |
+
|
267 |
+
|
268 |
+
def _stream_text_generation_response(
|
269 |
+
bytes_output_as_lines: Iterable[bytes], details: bool
|
270 |
+
) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]:
|
271 |
+
"""Used in `InferenceClient.text_generation`."""
|
272 |
+
# Parse ServerSentEvents
|
273 |
+
for byte_payload in bytes_output_as_lines:
|
274 |
+
output = _format_text_generation_stream_output(byte_payload, details)
|
275 |
+
if output is not None:
|
276 |
+
yield output
|
277 |
+
|
278 |
+
|
279 |
+
async def _async_stream_text_generation_response(
|
280 |
+
bytes_output_as_lines: AsyncIterable[bytes], details: bool
|
281 |
+
) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]:
|
282 |
+
"""Used in `AsyncInferenceClient.text_generation`."""
|
283 |
+
# Parse ServerSentEvents
|
284 |
+
async for byte_payload in bytes_output_as_lines:
|
285 |
+
output = _format_text_generation_stream_output(byte_payload, details)
|
286 |
+
if output is not None:
|
287 |
+
yield output
|
288 |
+
|
289 |
+
|
290 |
+
def _format_text_generation_stream_output(
|
291 |
+
byte_payload: bytes, details: bool
|
292 |
+
) -> Optional[Union[str, TextGenerationStreamOutput]]:
|
293 |
+
if not byte_payload.startswith(b"data:"):
|
294 |
+
return None # empty line
|
295 |
+
|
296 |
+
# Decode payload
|
297 |
+
payload = byte_payload.decode("utf-8")
|
298 |
+
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
|
299 |
+
|
300 |
+
# Either an error as being returned
|
301 |
+
if json_payload.get("error") is not None:
|
302 |
+
raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type"))
|
303 |
+
|
304 |
+
# Or parse token payload
|
305 |
+
output = TextGenerationStreamOutput.parse_obj_as_instance(json_payload)
|
306 |
+
return output.token.text if not details else output
|
307 |
+
|
308 |
+
|
309 |
+
def _format_chat_completion_stream_output_from_text_generation(
|
310 |
+
item: TextGenerationStreamOutput, created: int
|
311 |
+
) -> ChatCompletionStreamOutput:
|
312 |
+
if item.details is None:
|
313 |
+
# new token generated => return delta
|
314 |
+
return ChatCompletionStreamOutput(
|
315 |
+
# explicitly set 'dummy' values to reduce expectations from users
|
316 |
+
id="dummy",
|
317 |
+
model="dummy",
|
318 |
+
object="dummy",
|
319 |
+
system_fingerprint="dummy",
|
320 |
+
choices=[
|
321 |
+
ChatCompletionStreamOutputChoice(
|
322 |
+
delta=ChatCompletionStreamOutputDelta(
|
323 |
+
role="assistant",
|
324 |
+
content=item.token.text,
|
325 |
+
),
|
326 |
+
finish_reason=None,
|
327 |
+
index=0,
|
328 |
+
)
|
329 |
+
],
|
330 |
+
created=created,
|
331 |
+
)
|
332 |
+
else:
|
333 |
+
# generation is completed => return finish reason
|
334 |
+
return ChatCompletionStreamOutput(
|
335 |
+
# explicitly set 'dummy' values to reduce expectations from users
|
336 |
+
id="dummy",
|
337 |
+
model="dummy",
|
338 |
+
object="dummy",
|
339 |
+
system_fingerprint="dummy",
|
340 |
+
choices=[
|
341 |
+
ChatCompletionStreamOutputChoice(
|
342 |
+
delta=ChatCompletionStreamOutputDelta(role="assistant"),
|
343 |
+
finish_reason=item.details.finish_reason,
|
344 |
+
index=0,
|
345 |
+
)
|
346 |
+
],
|
347 |
+
created=created,
|
348 |
+
)
|
349 |
+
|
350 |
+
|
351 |
+
def _stream_chat_completion_response_from_bytes(
|
352 |
+
bytes_lines: Iterable[bytes],
|
353 |
+
) -> Iterable[ChatCompletionStreamOutput]:
|
354 |
+
"""Used in `InferenceClient.chat_completion` if model is served with TGI."""
|
355 |
+
for item in bytes_lines:
|
356 |
+
output = _format_chat_completion_stream_output_from_text_generation_from_bytes(item)
|
357 |
+
if output is not None:
|
358 |
+
yield output
|
359 |
+
|
360 |
+
|
361 |
+
async def _async_stream_chat_completion_response_from_bytes(
|
362 |
+
bytes_lines: AsyncIterable[bytes],
|
363 |
+
) -> AsyncIterable[ChatCompletionStreamOutput]:
|
364 |
+
"""Used in `AsyncInferenceClient.chat_completion`."""
|
365 |
+
async for item in bytes_lines:
|
366 |
+
output = _format_chat_completion_stream_output_from_text_generation_from_bytes(item)
|
367 |
+
if output is not None:
|
368 |
+
yield output
|
369 |
+
|
370 |
+
|
371 |
+
def _format_chat_completion_stream_output_from_text_generation_from_bytes(
|
372 |
+
byte_payload: bytes,
|
373 |
+
) -> Optional[ChatCompletionStreamOutput]:
|
374 |
+
if not byte_payload.startswith(b"data:"):
|
375 |
+
return None # empty line
|
376 |
+
|
377 |
+
# Decode payload
|
378 |
+
payload = byte_payload.decode("utf-8")
|
379 |
+
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
|
380 |
+
return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload)
|
381 |
+
|
382 |
+
|
383 |
+
async def _async_yield_from(client: "ClientSession", response: "ClientResponse") -> AsyncIterable[bytes]:
|
384 |
+
async for byte_payload in response.content:
|
385 |
+
yield byte_payload
|
386 |
+
await client.close()
|
387 |
+
|
388 |
+
|
389 |
+
# "TGI servers" are servers running with the `text-generation-inference` backend.
|
390 |
+
# This backend is the go-to solution to run large language models at scale. However,
|
391 |
+
# for some smaller models (e.g. "gpt2") the default `transformers` + `api-inference`
|
392 |
+
# solution is still in use.
|
393 |
+
#
|
394 |
+
# Both approaches have very similar APIs, but not exactly the same. What we do first in
|
395 |
+
# the `text_generation` method is to assume the model is served via TGI. If we realize
|
396 |
+
# it's not the case (i.e. we receive an HTTP 400 Bad Request), we fallback to the
|
397 |
+
# default API with a warning message. When that's the case, We remember the unsupported
|
398 |
+
# attributes for this model in the `_UNSUPPORTED_TEXT_GENERATION_KWARGS` global variable.
|
399 |
+
#
|
400 |
+
# In addition, TGI servers have a built-in API route for chat-completion, which is not
|
401 |
+
# available on the default API. We use this route to provide a more consistent behavior
|
402 |
+
# when available.
|
403 |
+
#
|
404 |
+
# For more details, see https://github.com/huggingface/text-generation-inference and
|
405 |
+
# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task.
|
406 |
+
|
407 |
+
_UNSUPPORTED_TEXT_GENERATION_KWARGS: Dict[Optional[str], List[str]] = {}
|
408 |
+
|
409 |
+
|
410 |
+
def _set_unsupported_text_generation_kwargs(model: Optional[str], unsupported_kwargs: List[str]) -> None:
|
411 |
+
_UNSUPPORTED_TEXT_GENERATION_KWARGS.setdefault(model, []).extend(unsupported_kwargs)
|
412 |
+
|
413 |
+
|
414 |
+
def _get_unsupported_text_generation_kwargs(model: Optional[str]) -> List[str]:
|
415 |
+
return _UNSUPPORTED_TEXT_GENERATION_KWARGS.get(model, [])
|
416 |
+
|
417 |
+
|
418 |
+
_NON_CHAT_COMPLETION_SERVER: Set[str] = set()
|
419 |
+
|
420 |
+
|
421 |
+
def _set_as_non_chat_completion_server(model: str) -> None:
|
422 |
+
_NON_CHAT_COMPLETION_SERVER.add(model)
|
423 |
+
|
424 |
+
|
425 |
+
def _is_chat_completion_server(model: str) -> bool:
|
426 |
+
return model not in _NON_CHAT_COMPLETION_SERVER
|
427 |
+
|
428 |
+
|
429 |
+
# TEXT GENERATION ERRORS
|
430 |
+
# ----------------------
|
431 |
+
# Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation
|
432 |
+
# inference project (https://github.com/huggingface/text-generation-inference).
|
433 |
+
# ----------------------
|
434 |
+
|
435 |
+
|
436 |
+
def raise_text_generation_error(http_error: HTTPError) -> NoReturn:
|
437 |
+
"""
|
438 |
+
Try to parse text-generation-inference error message and raise HTTPError in any case.
|
439 |
+
|
440 |
+
Args:
|
441 |
+
error (`HTTPError`):
|
442 |
+
The HTTPError that have been raised.
|
443 |
+
"""
|
444 |
+
# Try to parse a Text Generation Inference error
|
445 |
+
|
446 |
+
try:
|
447 |
+
# Hacky way to retrieve payload in case of aiohttp error
|
448 |
+
payload = getattr(http_error, "response_error_payload", None) or http_error.response.json()
|
449 |
+
error = payload.get("error")
|
450 |
+
error_type = payload.get("error_type")
|
451 |
+
except Exception: # no payload
|
452 |
+
raise http_error
|
453 |
+
|
454 |
+
# If error_type => more information than `hf_raise_for_status`
|
455 |
+
if error_type is not None:
|
456 |
+
exception = _parse_text_generation_error(error, error_type)
|
457 |
+
raise exception from http_error
|
458 |
+
|
459 |
+
# Otherwise, fallback to default error
|
460 |
+
raise http_error
|
461 |
+
|
462 |
+
|
463 |
+
def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError:
|
464 |
+
if error_type == "generation":
|
465 |
+
return GenerationError(error) # type: ignore
|
466 |
+
if error_type == "incomplete_generation":
|
467 |
+
return IncompleteGenerationError(error) # type: ignore
|
468 |
+
if error_type == "overloaded":
|
469 |
+
return OverloadedError(error) # type: ignore
|
470 |
+
if error_type == "validation":
|
471 |
+
return ValidationError(error) # type: ignore
|
472 |
+
return UnknownError(error) # type: ignore
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc
ADDED
Binary file (971 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc
ADDED
Binary file (1.45 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc
ADDED
Binary file (2.39 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc
ADDED
Binary file (1.67 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc
ADDED
Binary file (1.88 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc
ADDED
Binary file (1.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc
ADDED
Binary file (1.44 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc
ADDED
Binary file (1.65 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc
ADDED
Binary file (2.37 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc
ADDED
Binary file (1.71 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc
ADDED
Binary file (1.59 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc
ADDED
Binary file (1.54 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc
ADDED
Binary file (1.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc
ADDED
Binary file (1.72 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Literal, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class AudioClassificationParameters(BaseInferenceType):
|
17 |
+
"""Additional inference parameters
|
18 |
+
Additional inference parameters for Audio Classification
|
19 |
+
"""
|
20 |
+
|
21 |
+
function_to_apply: Optional["ClassificationOutputTransform"] = None
|
22 |
+
top_k: Optional[int] = None
|
23 |
+
"""When specified, limits the output to the top K most probable classes."""
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class AudioClassificationInput(BaseInferenceType):
|
28 |
+
"""Inputs for Audio Classification inference"""
|
29 |
+
|
30 |
+
inputs: Any
|
31 |
+
"""The input audio data"""
|
32 |
+
parameters: Optional[AudioClassificationParameters] = None
|
33 |
+
"""Additional inference parameters"""
|
34 |
+
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class AudioClassificationOutputElement(BaseInferenceType):
|
38 |
+
"""Outputs for Audio Classification inference"""
|
39 |
+
|
40 |
+
label: str
|
41 |
+
"""The predicted class label."""
|
42 |
+
score: float
|
43 |
+
"""The corresponding probability."""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class AudioToAudioInput(BaseInferenceType):
|
14 |
+
"""Inputs for Audio to Audio inference"""
|
15 |
+
|
16 |
+
inputs: Any
|
17 |
+
"""The input audio data"""
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class AudioToAudioOutputElement(BaseInferenceType):
|
22 |
+
"""Outputs of inference for the Audio To Audio task
|
23 |
+
A generated audio file with its label.
|
24 |
+
"""
|
25 |
+
|
26 |
+
blob: Any
|
27 |
+
"""The generated audio file."""
|
28 |
+
content_type: str
|
29 |
+
"""The content type of audio file."""
|
30 |
+
label: str
|
31 |
+
"""The label of the audio file."""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, List, Literal, Optional, Union
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
EarlyStoppingEnum = Literal["never"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType):
|
17 |
+
"""Parametrization of the text generation process
|
18 |
+
Ad-hoc parametrization of the text generation process
|
19 |
+
"""
|
20 |
+
|
21 |
+
do_sample: Optional[bool] = None
|
22 |
+
"""Whether to use sampling instead of greedy decoding when generating new tokens."""
|
23 |
+
early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None
|
24 |
+
"""Controls the stopping condition for beam-based methods."""
|
25 |
+
epsilon_cutoff: Optional[float] = None
|
26 |
+
"""If set to float strictly between 0 and 1, only tokens with a conditional probability
|
27 |
+
greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
28 |
+
3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
29 |
+
Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
30 |
+
"""
|
31 |
+
eta_cutoff: Optional[float] = None
|
32 |
+
"""Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
33 |
+
float strictly between 0 and 1, a token is only considered if it is greater than either
|
34 |
+
eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
35 |
+
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
36 |
+
the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
37 |
+
See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
38 |
+
for more details.
|
39 |
+
"""
|
40 |
+
max_length: Optional[int] = None
|
41 |
+
"""The maximum length (in tokens) of the generated text, including the input."""
|
42 |
+
max_new_tokens: Optional[int] = None
|
43 |
+
"""The maximum number of tokens to generate. Takes precedence over maxLength."""
|
44 |
+
min_length: Optional[int] = None
|
45 |
+
"""The minimum length (in tokens) of the generated text, including the input."""
|
46 |
+
min_new_tokens: Optional[int] = None
|
47 |
+
"""The minimum number of tokens to generate. Takes precedence over maxLength."""
|
48 |
+
num_beam_groups: Optional[int] = None
|
49 |
+
"""Number of groups to divide num_beams into in order to ensure diversity among different
|
50 |
+
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
51 |
+
"""
|
52 |
+
num_beams: Optional[int] = None
|
53 |
+
"""Number of beams to use for beam search."""
|
54 |
+
penalty_alpha: Optional[float] = None
|
55 |
+
"""The value balances the model confidence and the degeneration penalty in contrastive
|
56 |
+
search decoding.
|
57 |
+
"""
|
58 |
+
temperature: Optional[float] = None
|
59 |
+
"""The value used to modulate the next token probabilities."""
|
60 |
+
top_k: Optional[int] = None
|
61 |
+
"""The number of highest probability vocabulary tokens to keep for top-k-filtering."""
|
62 |
+
top_p: Optional[float] = None
|
63 |
+
"""If set to float < 1, only the smallest set of most probable tokens with probabilities
|
64 |
+
that add up to top_p or higher are kept for generation.
|
65 |
+
"""
|
66 |
+
typical_p: Optional[float] = None
|
67 |
+
"""Local typicality measures how similar the conditional probability of predicting a target
|
68 |
+
token next is to the expected conditional probability of predicting a random token next,
|
69 |
+
given the partial text already generated. If set to float < 1, the smallest set of the
|
70 |
+
most locally typical tokens with probabilities that add up to typical_p or higher are
|
71 |
+
kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
72 |
+
"""
|
73 |
+
use_cache: Optional[bool] = None
|
74 |
+
"""Whether the model should use the past last key/values attentions to speed up decoding"""
|
75 |
+
|
76 |
+
|
77 |
+
@dataclass
|
78 |
+
class AutomaticSpeechRecognitionParameters(BaseInferenceType):
|
79 |
+
"""Additional inference parameters
|
80 |
+
Additional inference parameters for Automatic Speech Recognition
|
81 |
+
"""
|
82 |
+
|
83 |
+
generate: Optional[AutomaticSpeechRecognitionGenerationParameters] = None
|
84 |
+
"""Parametrization of the text generation process"""
|
85 |
+
return_timestamps: Optional[bool] = None
|
86 |
+
"""Whether to output corresponding timestamps with the generated text"""
|
87 |
+
|
88 |
+
|
89 |
+
@dataclass
|
90 |
+
class AutomaticSpeechRecognitionInput(BaseInferenceType):
|
91 |
+
"""Inputs for Automatic Speech Recognition inference"""
|
92 |
+
|
93 |
+
inputs: Any
|
94 |
+
"""The input audio data"""
|
95 |
+
parameters: Optional[AutomaticSpeechRecognitionParameters] = None
|
96 |
+
"""Additional inference parameters"""
|
97 |
+
|
98 |
+
|
99 |
+
@dataclass
|
100 |
+
class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType):
|
101 |
+
text: str
|
102 |
+
"""A chunk of text identified by the model"""
|
103 |
+
timestamps: List[float]
|
104 |
+
"""The start and end timestamps corresponding with the text"""
|
105 |
+
|
106 |
+
|
107 |
+
@dataclass
|
108 |
+
class AutomaticSpeechRecognitionOutput(BaseInferenceType):
|
109 |
+
"""Outputs of inference for the Automatic Speech Recognition task"""
|
110 |
+
|
111 |
+
text: str
|
112 |
+
"""The recognized text."""
|
113 |
+
chunks: Optional[List[AutomaticSpeechRecognitionOutputChunk]] = None
|
114 |
+
"""When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
|
115 |
+
the model.
|
116 |
+
"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, List, Literal, Optional, Union
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class ChatCompletionInputFunctionDefinition(BaseInferenceType):
|
14 |
+
arguments: Any
|
15 |
+
name: str
|
16 |
+
description: Optional[str] = None
|
17 |
+
|
18 |
+
|
19 |
+
@dataclass
|
20 |
+
class ChatCompletionInputToolCall(BaseInferenceType):
|
21 |
+
function: ChatCompletionInputFunctionDefinition
|
22 |
+
id: int
|
23 |
+
type: str
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class ChatCompletionInputMessage(BaseInferenceType):
|
28 |
+
role: str
|
29 |
+
content: Optional[str] = None
|
30 |
+
name: Optional[str] = None
|
31 |
+
tool_calls: Optional[List[ChatCompletionInputToolCall]] = None
|
32 |
+
|
33 |
+
|
34 |
+
@dataclass
|
35 |
+
class ChatCompletionInputToolTypeClass(BaseInferenceType):
|
36 |
+
function_name: str
|
37 |
+
|
38 |
+
|
39 |
+
ChatCompletionInputToolTypeEnum = Literal["OneOf"]
|
40 |
+
|
41 |
+
|
42 |
+
@dataclass
|
43 |
+
class ChatCompletionInputTool(BaseInferenceType):
|
44 |
+
function: ChatCompletionInputFunctionDefinition
|
45 |
+
type: str
|
46 |
+
|
47 |
+
|
48 |
+
@dataclass
|
49 |
+
class ChatCompletionInput(BaseInferenceType):
|
50 |
+
"""Chat Completion Input.
|
51 |
+
Auto-generated from TGI specs.
|
52 |
+
For more details, check out
|
53 |
+
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
54 |
+
"""
|
55 |
+
|
56 |
+
messages: List[ChatCompletionInputMessage]
|
57 |
+
"""A list of messages comprising the conversation so far."""
|
58 |
+
model: str
|
59 |
+
"""[UNUSED] ID of the model to use. See the model endpoint compatibility table for details
|
60 |
+
on which models work with the Chat API.
|
61 |
+
"""
|
62 |
+
frequency_penalty: Optional[float] = None
|
63 |
+
"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
|
64 |
+
frequency in the text so far,
|
65 |
+
decreasing the model's likelihood to repeat the same line verbatim.
|
66 |
+
"""
|
67 |
+
logit_bias: Optional[List[float]] = None
|
68 |
+
"""UNUSED
|
69 |
+
Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
|
70 |
+
object that maps tokens
|
71 |
+
(specified by their token ID in the tokenizer) to an associated bias value from -100 to
|
72 |
+
100. Mathematically,
|
73 |
+
the bias is added to the logits generated by the model prior to sampling. The exact
|
74 |
+
effect will vary per model,
|
75 |
+
but values between -1 and 1 should decrease or increase likelihood of selection; values
|
76 |
+
like -100 or 100 should
|
77 |
+
result in a ban or exclusive selection of the relevant token.
|
78 |
+
"""
|
79 |
+
logprobs: Optional[bool] = None
|
80 |
+
"""Whether to return log probabilities of the output tokens or not. If true, returns the log
|
81 |
+
probabilities of each
|
82 |
+
output token returned in the content of message.
|
83 |
+
"""
|
84 |
+
max_tokens: Optional[int] = None
|
85 |
+
"""The maximum number of tokens that can be generated in the chat completion."""
|
86 |
+
n: Optional[int] = None
|
87 |
+
"""UNUSED
|
88 |
+
How many chat completion choices to generate for each input message. Note that you will
|
89 |
+
be charged based on the
|
90 |
+
number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
|
91 |
+
"""
|
92 |
+
presence_penalty: Optional[float] = None
|
93 |
+
"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they
|
94 |
+
appear in the text so far,
|
95 |
+
increasing the model's likelihood to talk about new topics
|
96 |
+
"""
|
97 |
+
seed: Optional[int] = None
|
98 |
+
stop: Optional[List[str]] = None
|
99 |
+
"""Up to 4 sequences where the API will stop generating further tokens."""
|
100 |
+
stream: Optional[bool] = None
|
101 |
+
temperature: Optional[float] = None
|
102 |
+
"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the
|
103 |
+
output more random, while
|
104 |
+
lower values like 0.2 will make it more focused and deterministic.
|
105 |
+
We generally recommend altering this or `top_p` but not both.
|
106 |
+
"""
|
107 |
+
tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, "ChatCompletionInputToolTypeEnum"]] = None
|
108 |
+
tool_prompt: Optional[str] = None
|
109 |
+
"""A prompt to be appended before the tools"""
|
110 |
+
tools: Optional[List[ChatCompletionInputTool]] = None
|
111 |
+
"""A list of tools the model may call. Currently, only functions are supported as a tool.
|
112 |
+
Use this to provide a list of
|
113 |
+
functions the model may generate JSON inputs for.
|
114 |
+
"""
|
115 |
+
top_logprobs: Optional[int] = None
|
116 |
+
"""An integer between 0 and 5 specifying the number of most likely tokens to return at each
|
117 |
+
token position, each with
|
118 |
+
an associated log probability. logprobs must be set to true if this parameter is used.
|
119 |
+
"""
|
120 |
+
top_p: Optional[float] = None
|
121 |
+
"""An alternative to sampling with temperature, called nucleus sampling, where the model
|
122 |
+
considers the results of the
|
123 |
+
tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%
|
124 |
+
probability mass are considered.
|
125 |
+
"""
|
126 |
+
|
127 |
+
|
128 |
+
@dataclass
|
129 |
+
class ChatCompletionOutputTopLogprob(BaseInferenceType):
|
130 |
+
logprob: float
|
131 |
+
token: str
|
132 |
+
|
133 |
+
|
134 |
+
@dataclass
|
135 |
+
class ChatCompletionOutputLogprob(BaseInferenceType):
|
136 |
+
logprob: float
|
137 |
+
token: str
|
138 |
+
top_logprobs: List[ChatCompletionOutputTopLogprob]
|
139 |
+
|
140 |
+
|
141 |
+
@dataclass
|
142 |
+
class ChatCompletionOutputLogprobs(BaseInferenceType):
|
143 |
+
content: List[ChatCompletionOutputLogprob]
|
144 |
+
|
145 |
+
|
146 |
+
@dataclass
|
147 |
+
class ChatCompletionOutputFunctionDefinition(BaseInferenceType):
|
148 |
+
arguments: Any
|
149 |
+
name: str
|
150 |
+
description: Optional[str] = None
|
151 |
+
|
152 |
+
|
153 |
+
@dataclass
|
154 |
+
class ChatCompletionOutputToolCall(BaseInferenceType):
|
155 |
+
function: ChatCompletionOutputFunctionDefinition
|
156 |
+
id: int
|
157 |
+
type: str
|
158 |
+
|
159 |
+
|
160 |
+
@dataclass
|
161 |
+
class ChatCompletionOutputMessage(BaseInferenceType):
|
162 |
+
role: str
|
163 |
+
content: Optional[str] = None
|
164 |
+
name: Optional[str] = None
|
165 |
+
tool_calls: Optional[List[ChatCompletionOutputToolCall]] = None
|
166 |
+
|
167 |
+
|
168 |
+
@dataclass
|
169 |
+
class ChatCompletionOutputComplete(BaseInferenceType):
|
170 |
+
finish_reason: str
|
171 |
+
index: int
|
172 |
+
message: ChatCompletionOutputMessage
|
173 |
+
logprobs: Optional[ChatCompletionOutputLogprobs] = None
|
174 |
+
|
175 |
+
|
176 |
+
@dataclass
|
177 |
+
class ChatCompletionOutputUsage(BaseInferenceType):
|
178 |
+
completion_tokens: int
|
179 |
+
prompt_tokens: int
|
180 |
+
total_tokens: int
|
181 |
+
|
182 |
+
|
183 |
+
@dataclass
|
184 |
+
class ChatCompletionOutput(BaseInferenceType):
|
185 |
+
"""Chat Completion Output.
|
186 |
+
Auto-generated from TGI specs.
|
187 |
+
For more details, check out
|
188 |
+
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
189 |
+
"""
|
190 |
+
|
191 |
+
choices: List[ChatCompletionOutputComplete]
|
192 |
+
created: int
|
193 |
+
id: str
|
194 |
+
model: str
|
195 |
+
object: str
|
196 |
+
system_fingerprint: str
|
197 |
+
usage: ChatCompletionOutputUsage
|
198 |
+
|
199 |
+
|
200 |
+
@dataclass
|
201 |
+
class ChatCompletionStreamOutputFunction(BaseInferenceType):
|
202 |
+
arguments: str
|
203 |
+
name: Optional[str] = None
|
204 |
+
|
205 |
+
|
206 |
+
@dataclass
|
207 |
+
class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):
|
208 |
+
function: ChatCompletionStreamOutputFunction
|
209 |
+
id: str
|
210 |
+
index: int
|
211 |
+
type: str
|
212 |
+
|
213 |
+
|
214 |
+
@dataclass
|
215 |
+
class ChatCompletionStreamOutputDelta(BaseInferenceType):
|
216 |
+
role: str
|
217 |
+
content: Optional[str] = None
|
218 |
+
tool_calls: Optional[ChatCompletionStreamOutputDeltaToolCall] = None
|
219 |
+
|
220 |
+
|
221 |
+
@dataclass
|
222 |
+
class ChatCompletionStreamOutputTopLogprob(BaseInferenceType):
|
223 |
+
logprob: float
|
224 |
+
token: str
|
225 |
+
|
226 |
+
|
227 |
+
@dataclass
|
228 |
+
class ChatCompletionStreamOutputLogprob(BaseInferenceType):
|
229 |
+
logprob: float
|
230 |
+
token: str
|
231 |
+
top_logprobs: List[ChatCompletionStreamOutputTopLogprob]
|
232 |
+
|
233 |
+
|
234 |
+
@dataclass
|
235 |
+
class ChatCompletionStreamOutputLogprobs(BaseInferenceType):
|
236 |
+
content: List[ChatCompletionStreamOutputLogprob]
|
237 |
+
|
238 |
+
|
239 |
+
@dataclass
|
240 |
+
class ChatCompletionStreamOutputChoice(BaseInferenceType):
|
241 |
+
delta: ChatCompletionStreamOutputDelta
|
242 |
+
index: int
|
243 |
+
finish_reason: Optional[str] = None
|
244 |
+
logprobs: Optional[ChatCompletionStreamOutputLogprobs] = None
|
245 |
+
|
246 |
+
|
247 |
+
@dataclass
|
248 |
+
class ChatCompletionStreamOutput(BaseInferenceType):
|
249 |
+
"""Chat Completion Stream Output.
|
250 |
+
Auto-generated from TGI specs.
|
251 |
+
For more details, check out
|
252 |
+
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
253 |
+
"""
|
254 |
+
|
255 |
+
choices: List[ChatCompletionStreamOutputChoice]
|
256 |
+
created: int
|
257 |
+
id: str
|
258 |
+
model: str
|
259 |
+
object: str
|
260 |
+
system_fingerprint: str
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Dict, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class DepthEstimationInput(BaseInferenceType):
|
14 |
+
"""Inputs for Depth Estimation inference"""
|
15 |
+
|
16 |
+
inputs: Any
|
17 |
+
"""The input image data"""
|
18 |
+
parameters: Optional[Dict[str, Any]] = None
|
19 |
+
"""Additional inference parameters"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class DepthEstimationOutput(BaseInferenceType):
|
24 |
+
"""Outputs of inference for the Depth Estimation task"""
|
25 |
+
|
26 |
+
depth: Any
|
27 |
+
"""The predicted depth as an image"""
|
28 |
+
predicted_depth: Any
|
29 |
+
"""The predicted depth as a tensor"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, List, Optional, Union
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class DocumentQuestionAnsweringInputData(BaseInferenceType):
|
14 |
+
"""One (document, question) pair to answer"""
|
15 |
+
|
16 |
+
image: Any
|
17 |
+
"""The image on which the question is asked"""
|
18 |
+
question: str
|
19 |
+
"""A question to ask of the document"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class DocumentQuestionAnsweringParameters(BaseInferenceType):
|
24 |
+
"""Additional inference parameters
|
25 |
+
Additional inference parameters for Document Question Answering
|
26 |
+
"""
|
27 |
+
|
28 |
+
doc_stride: Optional[int] = None
|
29 |
+
"""If the words in the document are too long to fit with the question for the model, it will
|
30 |
+
be split in several chunks with some overlap. This argument controls the size of that
|
31 |
+
overlap.
|
32 |
+
"""
|
33 |
+
handle_impossible_answer: Optional[bool] = None
|
34 |
+
"""Whether to accept impossible as an answer"""
|
35 |
+
lang: Optional[str] = None
|
36 |
+
"""Language to use while running OCR. Defaults to english."""
|
37 |
+
max_answer_len: Optional[int] = None
|
38 |
+
"""The maximum length of predicted answers (e.g., only answers with a shorter length are
|
39 |
+
considered).
|
40 |
+
"""
|
41 |
+
max_question_len: Optional[int] = None
|
42 |
+
"""The maximum length of the question after tokenization. It will be truncated if needed."""
|
43 |
+
max_seq_len: Optional[int] = None
|
44 |
+
"""The maximum length of the total sentence (context + question) in tokens of each chunk
|
45 |
+
passed to the model. The context will be split in several chunks (using doc_stride as
|
46 |
+
overlap) if needed.
|
47 |
+
"""
|
48 |
+
top_k: Optional[int] = None
|
49 |
+
"""The number of answers to return (will be chosen by order of likelihood). Can return less
|
50 |
+
than top_k answers if there are not enough options available within the context.
|
51 |
+
"""
|
52 |
+
word_boxes: Optional[List[Union[List[float], str]]] = None
|
53 |
+
"""A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
|
54 |
+
skip the OCR step and use the provided bounding boxes instead.
|
55 |
+
"""
|
56 |
+
|
57 |
+
|
58 |
+
@dataclass
|
59 |
+
class DocumentQuestionAnsweringInput(BaseInferenceType):
|
60 |
+
"""Inputs for Document Question Answering inference"""
|
61 |
+
|
62 |
+
inputs: DocumentQuestionAnsweringInputData
|
63 |
+
"""One (document, question) pair to answer"""
|
64 |
+
parameters: Optional[DocumentQuestionAnsweringParameters] = None
|
65 |
+
"""Additional inference parameters"""
|
66 |
+
|
67 |
+
|
68 |
+
@dataclass
|
69 |
+
class DocumentQuestionAnsweringOutputElement(BaseInferenceType):
|
70 |
+
"""Outputs of inference for the Document Question Answering task"""
|
71 |
+
|
72 |
+
answer: str
|
73 |
+
"""The answer to the question."""
|
74 |
+
end: int
|
75 |
+
"""The end word index of the answer (in the OCR’d version of the input or provided word
|
76 |
+
boxes).
|
77 |
+
"""
|
78 |
+
score: float
|
79 |
+
"""The probability associated to the answer."""
|
80 |
+
start: int
|
81 |
+
"""The start word index of the answer (in the OCR’d version of the input or provided word
|
82 |
+
boxes).
|
83 |
+
"""
|
84 |
+
words: List[int]
|
85 |
+
"""The index of each word/box pair that is in the answer"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Dict, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class FeatureExtractionInput(BaseInferenceType):
|
14 |
+
"""Inputs for Text Embedding inference"""
|
15 |
+
|
16 |
+
inputs: str
|
17 |
+
"""The text to get the embeddings of"""
|
18 |
+
parameters: Optional[Dict[str, Any]] = None
|
19 |
+
"""Additional inference parameters"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, List, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class FillMaskParameters(BaseInferenceType):
|
14 |
+
"""Additional inference parameters
|
15 |
+
Additional inference parameters for Fill Mask
|
16 |
+
"""
|
17 |
+
|
18 |
+
targets: Optional[List[str]] = None
|
19 |
+
"""When passed, the model will limit the scores to the passed targets instead of looking up
|
20 |
+
in the whole vocabulary. If the provided targets are not in the model vocab, they will be
|
21 |
+
tokenized and the first resulting token will be used (with a warning, and that might be
|
22 |
+
slower).
|
23 |
+
"""
|
24 |
+
top_k: Optional[int] = None
|
25 |
+
"""When passed, overrides the number of predictions to return."""
|
26 |
+
|
27 |
+
|
28 |
+
@dataclass
|
29 |
+
class FillMaskInput(BaseInferenceType):
|
30 |
+
"""Inputs for Fill Mask inference"""
|
31 |
+
|
32 |
+
inputs: str
|
33 |
+
"""The text with masked tokens"""
|
34 |
+
parameters: Optional[FillMaskParameters] = None
|
35 |
+
"""Additional inference parameters"""
|
36 |
+
|
37 |
+
|
38 |
+
@dataclass
|
39 |
+
class FillMaskOutputElement(BaseInferenceType):
|
40 |
+
"""Outputs of inference for the Fill Mask task"""
|
41 |
+
|
42 |
+
score: float
|
43 |
+
"""The corresponding probability"""
|
44 |
+
sequence: str
|
45 |
+
"""The corresponding input with the mask token prediction."""
|
46 |
+
token: int
|
47 |
+
"""The predicted token id (to replace the masked one)."""
|
48 |
+
token_str: Any
|
49 |
+
fill_mask_output_token_str: Optional[str] = None
|
50 |
+
"""The predicted token (to replace the masked one)."""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Literal, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class ImageClassificationParameters(BaseInferenceType):
|
17 |
+
"""Additional inference parameters
|
18 |
+
Additional inference parameters for Image Classification
|
19 |
+
"""
|
20 |
+
|
21 |
+
function_to_apply: Optional["ClassificationOutputTransform"] = None
|
22 |
+
top_k: Optional[int] = None
|
23 |
+
"""When specified, limits the output to the top K most probable classes."""
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class ImageClassificationInput(BaseInferenceType):
|
28 |
+
"""Inputs for Image Classification inference"""
|
29 |
+
|
30 |
+
inputs: Any
|
31 |
+
"""The input image data"""
|
32 |
+
parameters: Optional[ImageClassificationParameters] = None
|
33 |
+
"""Additional inference parameters"""
|
34 |
+
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class ImageClassificationOutputElement(BaseInferenceType):
|
38 |
+
"""Outputs of inference for the Image Classification task"""
|
39 |
+
|
40 |
+
label: str
|
41 |
+
"""The predicted class label."""
|
42 |
+
score: float
|
43 |
+
"""The corresponding probability."""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Literal, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class ImageSegmentationParameters(BaseInferenceType):
|
17 |
+
"""Additional inference parameters
|
18 |
+
Additional inference parameters for Image Segmentation
|
19 |
+
"""
|
20 |
+
|
21 |
+
mask_threshold: Optional[float] = None
|
22 |
+
"""Threshold to use when turning the predicted masks into binary values."""
|
23 |
+
overlap_mask_area_threshold: Optional[float] = None
|
24 |
+
"""Mask overlap threshold to eliminate small, disconnected segments."""
|
25 |
+
subtask: Optional["ImageSegmentationSubtask"] = None
|
26 |
+
"""Segmentation task to be performed, depending on model capabilities."""
|
27 |
+
threshold: Optional[float] = None
|
28 |
+
"""Probability threshold to filter out predicted masks."""
|
29 |
+
|
30 |
+
|
31 |
+
@dataclass
|
32 |
+
class ImageSegmentationInput(BaseInferenceType):
|
33 |
+
"""Inputs for Image Segmentation inference"""
|
34 |
+
|
35 |
+
inputs: Any
|
36 |
+
"""The input image data"""
|
37 |
+
parameters: Optional[ImageSegmentationParameters] = None
|
38 |
+
"""Additional inference parameters"""
|
39 |
+
|
40 |
+
|
41 |
+
@dataclass
|
42 |
+
class ImageSegmentationOutputElement(BaseInferenceType):
|
43 |
+
"""Outputs of inference for the Image Segmentation task
|
44 |
+
A predicted mask / segment
|
45 |
+
"""
|
46 |
+
|
47 |
+
label: str
|
48 |
+
"""The label of the predicted segment"""
|
49 |
+
mask: Any
|
50 |
+
"""The corresponding mask as a black-and-white image"""
|
51 |
+
score: Optional[float] = None
|
52 |
+
"""The score or confidence degreee the model has"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, List, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class ImageToImageTargetSize(BaseInferenceType):
|
14 |
+
"""The size in pixel of the output image"""
|
15 |
+
|
16 |
+
height: int
|
17 |
+
width: int
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class ImageToImageParameters(BaseInferenceType):
|
22 |
+
"""Additional inference parameters
|
23 |
+
Additional inference parameters for Image To Image
|
24 |
+
"""
|
25 |
+
|
26 |
+
guidance_scale: Optional[float] = None
|
27 |
+
"""For diffusion models. A higher guidance scale value encourages the model to generate
|
28 |
+
images closely linked to the text prompt at the expense of lower image quality.
|
29 |
+
"""
|
30 |
+
negative_prompt: Optional[List[str]] = None
|
31 |
+
"""One or several prompt to guide what NOT to include in image generation."""
|
32 |
+
num_inference_steps: Optional[int] = None
|
33 |
+
"""For diffusion models. The number of denoising steps. More denoising steps usually lead to
|
34 |
+
a higher quality image at the expense of slower inference.
|
35 |
+
"""
|
36 |
+
target_size: Optional[ImageToImageTargetSize] = None
|
37 |
+
"""The size in pixel of the output image"""
|
38 |
+
|
39 |
+
|
40 |
+
@dataclass
|
41 |
+
class ImageToImageInput(BaseInferenceType):
|
42 |
+
"""Inputs for Image To Image inference"""
|
43 |
+
|
44 |
+
inputs: Any
|
45 |
+
"""The input image data"""
|
46 |
+
parameters: Optional[ImageToImageParameters] = None
|
47 |
+
"""Additional inference parameters"""
|
48 |
+
|
49 |
+
|
50 |
+
@dataclass
|
51 |
+
class ImageToImageOutput(BaseInferenceType):
|
52 |
+
"""Outputs of inference for the Image To Image task"""
|
53 |
+
|
54 |
+
image: Any
|
55 |
+
"""The output image"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class ObjectDetectionParameters(BaseInferenceType):
|
14 |
+
"""Additional inference parameters
|
15 |
+
Additional inference parameters for Object Detection
|
16 |
+
"""
|
17 |
+
|
18 |
+
threshold: Optional[float] = None
|
19 |
+
"""The probability necessary to make a prediction."""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class ObjectDetectionInput(BaseInferenceType):
|
24 |
+
"""Inputs for Object Detection inference"""
|
25 |
+
|
26 |
+
inputs: Any
|
27 |
+
"""The input image data"""
|
28 |
+
parameters: Optional[ObjectDetectionParameters] = None
|
29 |
+
"""Additional inference parameters"""
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class ObjectDetectionBoundingBox(BaseInferenceType):
|
34 |
+
"""The predicted bounding box. Coordinates are relative to the top left corner of the input
|
35 |
+
image.
|
36 |
+
"""
|
37 |
+
|
38 |
+
xmax: int
|
39 |
+
xmin: int
|
40 |
+
ymax: int
|
41 |
+
ymin: int
|
42 |
+
|
43 |
+
|
44 |
+
@dataclass
|
45 |
+
class ObjectDetectionOutputElement(BaseInferenceType):
|
46 |
+
"""Outputs of inference for the Object Detection task"""
|
47 |
+
|
48 |
+
box: ObjectDetectionBoundingBox
|
49 |
+
"""The predicted bounding box. Coordinates are relative to the top left corner of the input
|
50 |
+
image.
|
51 |
+
"""
|
52 |
+
label: str
|
53 |
+
"""The predicted label for the bounding box"""
|
54 |
+
score: float
|
55 |
+
"""The associated score / probability"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class QuestionAnsweringInputData(BaseInferenceType):
|
14 |
+
"""One (context, question) pair to answer"""
|
15 |
+
|
16 |
+
context: str
|
17 |
+
"""The context to be used for answering the question"""
|
18 |
+
question: str
|
19 |
+
"""The question to be answered"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class QuestionAnsweringParameters(BaseInferenceType):
|
24 |
+
"""Additional inference parameters
|
25 |
+
Additional inference parameters for Question Answering
|
26 |
+
"""
|
27 |
+
|
28 |
+
align_to_words: Optional[bool] = None
|
29 |
+
"""Attempts to align the answer to real words. Improves quality on space separated
|
30 |
+
languages. Might hurt on non-space-separated languages (like Japanese or Chinese)
|
31 |
+
"""
|
32 |
+
doc_stride: Optional[int] = None
|
33 |
+
"""If the context is too long to fit with the question for the model, it will be split in
|
34 |
+
several chunks with some overlap. This argument controls the size of that overlap.
|
35 |
+
"""
|
36 |
+
handle_impossible_answer: Optional[bool] = None
|
37 |
+
"""Whether to accept impossible as an answer."""
|
38 |
+
max_answer_len: Optional[int] = None
|
39 |
+
"""The maximum length of predicted answers (e.g., only answers with a shorter length are
|
40 |
+
considered).
|
41 |
+
"""
|
42 |
+
max_question_len: Optional[int] = None
|
43 |
+
"""The maximum length of the question after tokenization. It will be truncated if needed."""
|
44 |
+
max_seq_len: Optional[int] = None
|
45 |
+
"""The maximum length of the total sentence (context + question) in tokens of each chunk
|
46 |
+
passed to the model. The context will be split in several chunks (using docStride as
|
47 |
+
overlap) if needed.
|
48 |
+
"""
|
49 |
+
top_k: Optional[int] = None
|
50 |
+
"""The number of answers to return (will be chosen by order of likelihood). Note that we
|
51 |
+
return less than topk answers if there are not enough options available within the
|
52 |
+
context.
|
53 |
+
"""
|
54 |
+
|
55 |
+
|
56 |
+
@dataclass
|
57 |
+
class QuestionAnsweringInput(BaseInferenceType):
|
58 |
+
"""Inputs for Question Answering inference"""
|
59 |
+
|
60 |
+
inputs: QuestionAnsweringInputData
|
61 |
+
"""One (context, question) pair to answer"""
|
62 |
+
parameters: Optional[QuestionAnsweringParameters] = None
|
63 |
+
"""Additional inference parameters"""
|
64 |
+
|
65 |
+
|
66 |
+
@dataclass
|
67 |
+
class QuestionAnsweringOutputElement(BaseInferenceType):
|
68 |
+
"""Outputs of inference for the Question Answering task"""
|
69 |
+
|
70 |
+
answer: str
|
71 |
+
"""The answer to the question."""
|
72 |
+
end: int
|
73 |
+
"""The character position in the input where the answer ends."""
|
74 |
+
score: float
|
75 |
+
"""The probability associated to the answer."""
|
76 |
+
start: int
|
77 |
+
"""The character position in the input where the answer begins."""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/sentence_similarity.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Dict, List, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class SentenceSimilarityInputData(BaseInferenceType):
|
14 |
+
sentences: List[str]
|
15 |
+
"""A list of strings which will be compared against the source_sentence."""
|
16 |
+
source_sentence: str
|
17 |
+
"""The string that you wish to compare the other strings with. This can be a phrase,
|
18 |
+
sentence, or longer passage, depending on the model being used.
|
19 |
+
"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class SentenceSimilarityInput(BaseInferenceType):
|
24 |
+
"""Inputs for Sentence similarity inference"""
|
25 |
+
|
26 |
+
inputs: SentenceSimilarityInputData
|
27 |
+
parameters: Optional[Dict[str, Any]] = None
|
28 |
+
"""Additional inference parameters"""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Dict, List, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class TableQuestionAnsweringInputData(BaseInferenceType):
|
14 |
+
"""One (table, question) pair to answer"""
|
15 |
+
|
16 |
+
question: str
|
17 |
+
"""The question to be answered about the table"""
|
18 |
+
table: Dict[str, List[str]]
|
19 |
+
"""The table to serve as context for the questions"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class TableQuestionAnsweringInput(BaseInferenceType):
|
24 |
+
"""Inputs for Table Question Answering inference"""
|
25 |
+
|
26 |
+
inputs: TableQuestionAnsweringInputData
|
27 |
+
"""One (table, question) pair to answer"""
|
28 |
+
parameters: Optional[Dict[str, Any]] = None
|
29 |
+
"""Additional inference parameters"""
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class TableQuestionAnsweringOutputElement(BaseInferenceType):
|
34 |
+
"""Outputs of inference for the Table Question Answering task"""
|
35 |
+
|
36 |
+
answer: str
|
37 |
+
"""The answer of the question given the table. If there is an aggregator, the answer will be
|
38 |
+
preceded by `AGGREGATOR >`.
|
39 |
+
"""
|
40 |
+
cells: List[str]
|
41 |
+
"""List of strings made up of the answer cell values."""
|
42 |
+
coordinates: List[List[int]]
|
43 |
+
"""Coordinates of the cells of the answers."""
|
44 |
+
aggregator: Optional[str] = None
|
45 |
+
"""If the model has an aggregator, this returns the aggregator."""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Dict, Literal, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
Text2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class Text2TextGenerationParameters(BaseInferenceType):
|
17 |
+
"""Additional inference parameters
|
18 |
+
Additional inference parameters for Text2text Generation
|
19 |
+
"""
|
20 |
+
|
21 |
+
clean_up_tokenization_spaces: Optional[bool] = None
|
22 |
+
"""Whether to clean up the potential extra spaces in the text output."""
|
23 |
+
generate_parameters: Optional[Dict[str, Any]] = None
|
24 |
+
"""Additional parametrization of the text generation algorithm"""
|
25 |
+
truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
|
26 |
+
"""The truncation strategy to use"""
|
27 |
+
|
28 |
+
|
29 |
+
@dataclass
|
30 |
+
class Text2TextGenerationInput(BaseInferenceType):
|
31 |
+
"""Inputs for Text2text Generation inference"""
|
32 |
+
|
33 |
+
inputs: str
|
34 |
+
"""The input text data"""
|
35 |
+
parameters: Optional[Text2TextGenerationParameters] = None
|
36 |
+
"""Additional inference parameters"""
|
37 |
+
|
38 |
+
|
39 |
+
@dataclass
|
40 |
+
class Text2TextGenerationOutput(BaseInferenceType):
|
41 |
+
"""Outputs of inference for the Text2text Generation task"""
|
42 |
+
|
43 |
+
generated_text: Any
|
44 |
+
text2_text_generation_output_generated_text: Optional[str] = None
|
45 |
+
"""The generated text."""
|
llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Literal, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class TextClassificationParameters(BaseInferenceType):
|
17 |
+
"""Additional inference parameters
|
18 |
+
Additional inference parameters for Text Classification
|
19 |
+
"""
|
20 |
+
|
21 |
+
function_to_apply: Optional["ClassificationOutputTransform"] = None
|
22 |
+
top_k: Optional[int] = None
|
23 |
+
"""When specified, limits the output to the top K most probable classes."""
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class TextClassificationInput(BaseInferenceType):
|
28 |
+
"""Inputs for Text Classification inference"""
|
29 |
+
|
30 |
+
inputs: str
|
31 |
+
"""The text to classify"""
|
32 |
+
parameters: Optional[TextClassificationParameters] = None
|
33 |
+
"""Additional inference parameters"""
|
34 |
+
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class TextClassificationOutputElement(BaseInferenceType):
|
38 |
+
"""Outputs of inference for the Text Classification task"""
|
39 |
+
|
40 |
+
label: str
|
41 |
+
"""The predicted class label."""
|
42 |
+
score: float
|
43 |
+
"""The corresponding probability."""
|