diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_160_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_160_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0b1a353e6afe823761ceb06c2c68414bada7ab75 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_160_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:179d1ed1c7e1c869578a9c215acadb6b3bdc32b9ca0b36904058c42f24ca0442 +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_246_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_246_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d2fbda6e2e4a3072fcf1f6b558269e3fa92333b6 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_246_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:236817bcd78c5159bc5e6f66895defe217b11eba45accc0a410985fdbdc3ce31 +size 41830340 diff --git a/ckpts/llama-3b/global_step100/layer_29-model_01-model_states.pt b/ckpts/llama-3b/global_step100/layer_29-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..39954e0420eb9ceb0bfa0c134e5c6c7ca3112f70 --- /dev/null +++ b/ckpts/llama-3b/global_step100/layer_29-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:427f9304deb973b37942df0ff847d672c3d270e067578690a8456aab87159bbe +size 116407086 diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__init__.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..49d088214505b9604964ab142e7f8a5b38ccd5ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from argparse import _SubParsersAction + + +class BaseHuggingfaceCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: _SubParsersAction): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ab452c729af93b3f4546b6823249db0958e1a1a Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e28a4ab3ef815e5f5c6118d766e6bd3d91ac86c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7eafc2184f32cd617dc5d9cc5a67415b659da27 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/download.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/download.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51795a2c70f0060b01d28d4b0c365a0fb3fed2cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/download.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/env.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b1fad13189221b8f66b4dfa8770a3a36382b112 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/env.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f423da8119bf3df0983c14bdeba560545a11a856 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38a9aa8f22970978cc6490903d7b181982414fa0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7853a89ae9370b9bf12573906369d2f0ff7fe91 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00900aab3b8245210d74ab3b6eee9cd634497e00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/user.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/user.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8686d6e81ab9e524268c9e0ad324580b1c5fe64 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/user.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/_cli_utils.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/_cli_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7a1f8601618f07cbfb9782b30e85349818766a94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/_cli_utils.py @@ -0,0 +1,64 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains a utility for good-looking prints.""" + +import os +from typing import List, Union + + +class ANSI: + """ + Helper for en.wikipedia.org/wiki/ANSI_escape_code + """ + + _bold = "\u001b[1m" + _gray = "\u001b[90m" + _red = "\u001b[31m" + _reset = "\u001b[0m" + + @classmethod + def bold(cls, s: str) -> str: + return cls._format(s, cls._bold) + + @classmethod + def gray(cls, s: str) -> str: + return cls._format(s, cls._gray) + + @classmethod + def red(cls, s: str) -> str: + return cls._format(s, cls._bold + cls._red) + + @classmethod + def _format(cls, s: str, code: str) -> str: + if os.environ.get("NO_COLOR"): + # See https://no-color.org/ + return s + return f"{code}{s}{cls._reset}" + + +def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str: + """ + Inspired by: + + - stackoverflow.com/a/8356620/593036 + - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data + """ + col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)] + row_format = ("{{:{}}} " * len(headers)).format(*col_widths) + lines = [] + lines.append(row_format.format(*headers)) + lines.append(row_format.format(*["-" * w for w in col_widths])) + for row in rows: + lines.append(row_format.format(*row)) + return "\n".join(lines) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/delete_cache.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/delete_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..b2fc44d31c8f59e6c517dd6df71466a319e5ea0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/delete_cache.py @@ -0,0 +1,428 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to delete some revisions from the HF cache directory. + +Usage: + huggingface-cli delete-cache + huggingface-cli delete-cache --disable-tui + huggingface-cli delete-cache --dir ~/.cache/huggingface/hub + +NOTE: + This command is based on `InquirerPy` to build the multiselect menu in the terminal. + This dependency has to be installed with `pip install huggingface_hub[cli]`. Since + we want to avoid as much as possible cross-platform issues, I chose a library that + is built on top of `python-prompt-toolkit` which seems to be a reference in terminal + GUI (actively maintained on both Unix and Windows, 7.9k stars). + + For the moment, the TUI feature is in beta. + + See: + - https://github.com/kazhala/InquirerPy + - https://inquirerpy.readthedocs.io/en/latest/ + - https://github.com/prompt-toolkit/python-prompt-toolkit + + Other solutions could have been: + - `simple_term_menu`: would be good as well for our use case but some issues suggest + that Windows is less supported. + See: https://github.com/IngoMeyer441/simple-term-menu + - `PyInquirer`: very similar to `InquirerPy` but older and not maintained anymore. + In particular, no support of Python3.10. + See: https://github.com/CITGuru/PyInquirer + - `pick` (or `pickpack`): easy to use and flexible but built on top of Python's + standard library `curses` that is specific to Unix (not implemented on Windows). + See https://github.com/wong2/pick and https://github.com/anafvana/pickpack. + - `inquirer`: lot of traction (700 stars) but explicitly states "experimental + support of Windows". Not built on top of `python-prompt-toolkit`. + See https://github.com/magmax/python-inquirer + +TODO: add support for `huggingface-cli delete-cache aaaaaa bbbbbb cccccc (...)` ? +TODO: add "--keep-last" arg to delete revisions that are not on `main` ref +TODO: add "--filter" arg to filter repositories by name ? +TODO: add "--sort" arg to sort by size ? +TODO: add "--limit" arg to limit to X repos ? +TODO: add "-y" arg for immediate deletion ? +See discussions in https://github.com/huggingface/huggingface_hub/issues/1025. +""" + +import os +from argparse import Namespace, _SubParsersAction +from functools import wraps +from tempfile import mkstemp +from typing import Any, Callable, Iterable, List, Optional, Union + +from ..utils import CachedRepoInfo, CachedRevisionInfo, HFCacheInfo, scan_cache_dir +from . import BaseHuggingfaceCLICommand +from ._cli_utils import ANSI + + +try: + from InquirerPy import inquirer + from InquirerPy.base.control import Choice + from InquirerPy.separator import Separator + + _inquirer_py_available = True +except ImportError: + _inquirer_py_available = False + + +def require_inquirer_py(fn: Callable) -> Callable: + """Decorator to flag methods that require `InquirerPy`.""" + + # TODO: refactor this + imports in a unified pattern across codebase + @wraps(fn) + def _inner(*args, **kwargs): + if not _inquirer_py_available: + raise ImportError( + "The `delete-cache` command requires extra dependencies to work with" + " the TUI.\nPlease run `pip install huggingface_hub[cli]` to install" + " them.\nOtherwise, disable TUI using the `--disable-tui` flag." + ) + + return fn(*args, **kwargs) + + return _inner + + +# Possibility for the user to cancel deletion +_CANCEL_DELETION_STR = "CANCEL_DELETION" + + +class DeleteCacheCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + delete_cache_parser = parser.add_parser("delete-cache", help="Delete revisions from the cache directory.") + + delete_cache_parser.add_argument( + "--dir", + type=str, + default=None, + help="cache directory (optional). Default to the default HuggingFace cache.", + ) + + delete_cache_parser.add_argument( + "--disable-tui", + action="store_true", + help=( + "Disable Terminal User Interface (TUI) mode. Useful if your" + " platform/terminal doesn't support the multiselect menu." + ), + ) + + delete_cache_parser.set_defaults(func=DeleteCacheCommand) + + def __init__(self, args: Namespace) -> None: + self.cache_dir: Optional[str] = args.dir + self.disable_tui: bool = args.disable_tui + + def run(self): + """Run `delete-cache` command with or without TUI.""" + # Scan cache directory + hf_cache_info = scan_cache_dir(self.cache_dir) + + # Manual review from the user + if self.disable_tui: + selected_hashes = _manual_review_no_tui(hf_cache_info, preselected=[]) + else: + selected_hashes = _manual_review_tui(hf_cache_info, preselected=[]) + + # If deletion is not cancelled + if len(selected_hashes) > 0 and _CANCEL_DELETION_STR not in selected_hashes: + confirm_message = _get_expectations_str(hf_cache_info, selected_hashes) + " Confirm deletion ?" + + # Confirm deletion + if self.disable_tui: + confirmed = _ask_for_confirmation_no_tui(confirm_message) + else: + confirmed = _ask_for_confirmation_tui(confirm_message) + + # Deletion is confirmed + if confirmed: + strategy = hf_cache_info.delete_revisions(*selected_hashes) + print("Start deletion.") + strategy.execute() + print( + f"Done. Deleted {len(strategy.repos)} repo(s) and" + f" {len(strategy.snapshots)} revision(s) for a total of" + f" {strategy.expected_freed_size_str}." + ) + return + + # Deletion is cancelled + print("Deletion is cancelled. Do nothing.") + + +@require_inquirer_py +def _manual_review_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]: + """Ask the user for a manual review of the revisions to delete. + + Displays a multi-select menu in the terminal (TUI). + """ + # Define multiselect list + choices = _get_tui_choices_from_scan(repos=hf_cache_info.repos, preselected=preselected) + checkbox = inquirer.checkbox( + message="Select revisions to delete:", + choices=choices, # List of revisions with some pre-selection + cycle=False, # No loop between top and bottom + height=100, # Large list if possible + # We use the instruction to display to the user the expected effect of the + # deletion. + instruction=_get_expectations_str( + hf_cache_info, + selected_hashes=[c.value for c in choices if isinstance(c, Choice) and c.enabled], + ), + # We use the long instruction to should keybindings instructions to the user + long_instruction="Press to select, to validate and to quit without modification.", + # Message that is displayed once the user validates its selection. + transformer=lambda result: f"{len(result)} revision(s) selected.", + ) + + # Add a callback to update the information line when a revision is + # selected/unselected + def _update_expectations(_) -> None: + # Hacky way to dynamically set an instruction message to the checkbox when + # a revision hash is selected/unselected. + checkbox._instruction = _get_expectations_str( + hf_cache_info, + selected_hashes=[choice["value"] for choice in checkbox.content_control.choices if choice["enabled"]], + ) + + checkbox.kb_func_lookup["toggle"].append({"func": _update_expectations}) + + # Finally display the form to the user. + try: + return checkbox.execute() + except KeyboardInterrupt: + return [] # Quit without deletion + + +@require_inquirer_py +def _ask_for_confirmation_tui(message: str, default: bool = True) -> bool: + """Ask for confirmation using Inquirer.""" + return inquirer.confirm(message, default=default).execute() + + +def _get_tui_choices_from_scan(repos: Iterable[CachedRepoInfo], preselected: List[str]) -> List: + """Build a list of choices from the scanned repos. + + Args: + repos (*Iterable[`CachedRepoInfo`]*): + List of scanned repos on which we want to delete revisions. + preselected (*List[`str`]*): + List of revision hashes that will be preselected. + + Return: + The list of choices to pass to `inquirer.checkbox`. + """ + choices: List[Union[Choice, Separator]] = [] + + # First choice is to cancel the deletion. If selected, nothing will be deleted, + # no matter the other selected items. + choices.append( + Choice( + _CANCEL_DELETION_STR, + name="None of the following (if selected, nothing will be deleted).", + enabled=False, + ) + ) + + # Display a separator per repo and a Choice for each revisions of the repo + for repo in sorted(repos, key=_repo_sorting_order): + # Repo as separator + choices.append( + Separator( + f"\n{repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str}," + f" used {repo.last_accessed_str})" + ) + ) + for revision in sorted(repo.revisions, key=_revision_sorting_order): + # Revision as choice + choices.append( + Choice( + revision.commit_hash, + name=( + f"{revision.commit_hash[:8]}:" + f" {', '.join(sorted(revision.refs)) or '(detached)'} #" + f" modified {revision.last_modified_str}" + ), + enabled=revision.commit_hash in preselected, + ) + ) + + # Return choices + return choices + + +def _manual_review_no_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]: + """Ask the user for a manual review of the revisions to delete. + + Used when TUI is disabled. Manual review happens in a separate tmp file that the + user can manually edit. + """ + # 1. Generate temporary file with delete commands. + fd, tmp_path = mkstemp(suffix=".txt") # suffix to make it easier to find by editors + os.close(fd) + + lines = [] + for repo in sorted(hf_cache_info.repos, key=_repo_sorting_order): + lines.append( + f"\n# {repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str}," + f" used {repo.last_accessed_str})" + ) + for revision in sorted(repo.revisions, key=_revision_sorting_order): + lines.append( + # Deselect by prepending a '#' + f"{'' if revision.commit_hash in preselected else '#'} " + f" {revision.commit_hash} # Refs:" + # Print `refs` as comment on same line + f" {', '.join(sorted(revision.refs)) or '(detached)'} # modified" + # Print `last_modified` as comment on same line + f" {revision.last_modified_str}" + ) + + with open(tmp_path, "w") as f: + f.write(_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS) + f.write("\n".join(lines)) + + # 2. Prompt instructions to user. + instructions = f""" + TUI is disabled. In order to select which revisions you want to delete, please edit + the following file using the text editor of your choice. Instructions for manual + editing are located at the beginning of the file. Edit the file, save it and confirm + to continue. + File to edit: {ANSI.bold(tmp_path)} + """ + print("\n".join(line.strip() for line in instructions.strip().split("\n"))) + + # 3. Wait for user confirmation. + while True: + selected_hashes = _read_manual_review_tmp_file(tmp_path) + if _ask_for_confirmation_no_tui( + _get_expectations_str(hf_cache_info, selected_hashes) + " Continue ?", + default=False, + ): + break + + # 4. Return selected_hashes + os.remove(tmp_path) + return selected_hashes + + +def _ask_for_confirmation_no_tui(message: str, default: bool = True) -> bool: + """Ask for confirmation using pure-python.""" + YES = ("y", "yes", "1") + NO = ("n", "no", "0") + DEFAULT = "" + ALL = YES + NO + (DEFAULT,) + full_message = message + (" (Y/n) " if default else " (y/N) ") + while True: + answer = input(full_message).lower() + if answer == DEFAULT: + return default + if answer in YES: + return True + if answer in NO: + return False + print(f"Invalid input. Must be one of {ALL}") + + +def _get_expectations_str(hf_cache_info: HFCacheInfo, selected_hashes: List[str]) -> str: + """Format a string to display to the user how much space would be saved. + + Example: + ``` + >>> _get_expectations_str(hf_cache_info, selected_hashes) + '7 revisions selected counting for 4.3G.' + ``` + """ + if _CANCEL_DELETION_STR in selected_hashes: + return "Nothing will be deleted." + strategy = hf_cache_info.delete_revisions(*selected_hashes) + return f"{len(selected_hashes)} revisions selected counting for {strategy.expected_freed_size_str}." + + +def _read_manual_review_tmp_file(tmp_path: str) -> List[str]: + """Read the manually reviewed instruction file and return a list of revision hash. + + Example: + ```txt + # This is the tmp file content + ### + + # Commented out line + 123456789 # revision hash + + # Something else + # a_newer_hash # 2 days ago + an_older_hash # 3 days ago + ``` + + ```py + >>> _read_manual_review_tmp_file(tmp_path) + ['123456789', 'an_older_hash'] + ``` + """ + with open(tmp_path) as f: + content = f.read() + + # Split lines + lines = [line.strip() for line in content.split("\n")] + + # Filter commented lines + selected_lines = [line for line in lines if not line.startswith("#")] + + # Select only before comment + selected_hashes = [line.split("#")[0].strip() for line in selected_lines] + + # Return revision hashes + return [hash for hash in selected_hashes if len(hash) > 0] + + +_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS = f""" +# INSTRUCTIONS +# ------------ +# This is a temporary file created by running `huggingface-cli delete-cache` with the +# `--disable-tui` option. It contains a set of revisions that can be deleted from your +# local cache directory. +# +# Please manually review the revisions you want to delete: +# - Revision hashes can be commented out with '#'. +# - Only non-commented revisions in this file will be deleted. +# - Revision hashes that are removed from this file are ignored as well. +# - If `{_CANCEL_DELETION_STR}` line is uncommented, the all cache deletion is cancelled and +# no changes will be applied. +# +# Once you've manually reviewed this file, please confirm deletion in the terminal. This +# file will be automatically removed once done. +# ------------ + +# KILL SWITCH +# ------------ +# Un-comment following line to completely cancel the deletion process +# {_CANCEL_DELETION_STR} +# ------------ + +# REVISIONS +# ------------ +""".strip() + + +def _repo_sorting_order(repo: CachedRepoInfo) -> Any: + # First split by Dataset/Model, then sort by last accessed (oldest first) + return (repo.repo_type, repo.last_accessed) + + +def _revision_sorting_order(revision: CachedRevisionInfo) -> Any: + # Sort by last modified (oldest first) + return revision.last_modified diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/download.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/download.py new file mode 100644 index 0000000000000000000000000000000000000000..1de7738a14fa7dc9fc89577013532c44b50caf02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/download.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to download files from the Hub with the CLI. + +Usage: + huggingface-cli download --help + + # Download file + huggingface-cli download gpt2 config.json + + # Download entire repo + huggingface-cli download fffiloni/zeroscope --repo-type=space --revision=refs/pr/78 + + # Download repo with filters + huggingface-cli download gpt2 --include="*.safetensors" + + # Download with token + huggingface-cli download Wauplin/private-model --token=hf_*** + + # Download quietly (no progress bar, no warnings, only the returned path) + huggingface-cli download gpt2 config.json --quiet + + # Download to local dir + huggingface-cli download gpt2 --local-dir=./models/gpt2 +""" + +import warnings +from argparse import Namespace, _SubParsersAction +from typing import List, Literal, Optional, Union + +from huggingface_hub import logging +from huggingface_hub._snapshot_download import snapshot_download +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.constants import HF_HUB_ENABLE_HF_TRANSFER +from huggingface_hub.file_download import hf_hub_download +from huggingface_hub.utils import disable_progress_bars, enable_progress_bars + + +logger = logging.get_logger(__name__) + + +class DownloadCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + download_parser = parser.add_parser("download", help="Download files from the Hub") + download_parser.add_argument( + "repo_id", type=str, help="ID of the repo to download from (e.g. `username/repo-name`)." + ) + download_parser.add_argument( + "filenames", type=str, nargs="*", help="Files to download (e.g. `config.json`, `data/metadata.jsonl`)." + ) + download_parser.add_argument( + "--repo-type", + choices=["model", "dataset", "space"], + default="model", + help="Type of repo to download from (defaults to 'model').", + ) + download_parser.add_argument( + "--revision", + type=str, + help="An optional Git revision id which can be a branch name, a tag, or a commit hash.", + ) + download_parser.add_argument( + "--include", nargs="*", type=str, help="Glob patterns to match files to download." + ) + download_parser.add_argument( + "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to download." + ) + download_parser.add_argument( + "--cache-dir", type=str, help="Path to the directory where to save the downloaded files." + ) + download_parser.add_argument( + "--local-dir", + type=str, + help=( + "If set, the downloaded file will be placed under this directory either as a symlink (default) or a" + " regular file. Check out" + " https://huggingface.co/docs/huggingface_hub/guides/download#download-files-to-local-folder for more" + " details." + ), + ) + download_parser.add_argument( + "--local-dir-use-symlinks", + choices=["auto", "True", "False"], + default="auto", + help=( + "To be used with `local_dir`. If set to 'auto', the cache directory will be used and the file will be" + " either duplicated or symlinked to the local directory depending on its size. It set to `True`, a" + " symlink will be created, no matter the file size. If set to `False`, the file will either be" + " duplicated from cache (if already exists) or downloaded from the Hub and not cached." + ), + ) + download_parser.add_argument( + "--force-download", + action="store_true", + help="If True, the files will be downloaded even if they are already cached.", + ) + download_parser.add_argument( + "--resume-download", action="store_true", help="If True, resume a previously interrupted download." + ) + download_parser.add_argument( + "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens" + ) + download_parser.add_argument( + "--quiet", + action="store_true", + help="If True, progress bars are disabled and only the path to the download files is printed.", + ) + download_parser.set_defaults(func=DownloadCommand) + + def __init__(self, args: Namespace) -> None: + self.token = args.token + self.repo_id: str = args.repo_id + self.filenames: List[str] = args.filenames + self.repo_type: str = args.repo_type + self.revision: Optional[str] = args.revision + self.include: Optional[List[str]] = args.include + self.exclude: Optional[List[str]] = args.exclude + self.cache_dir: Optional[str] = args.cache_dir + self.local_dir: Optional[str] = args.local_dir + self.force_download: bool = args.force_download + self.resume_download: bool = args.resume_download + self.quiet: bool = args.quiet + + # Raise if local_dir_use_symlinks is invalid + self.local_dir_use_symlinks: Union[Literal["auto"], bool] + use_symlinks_lowercase = args.local_dir_use_symlinks.lower() + if use_symlinks_lowercase == "true": + self.local_dir_use_symlinks = True + elif use_symlinks_lowercase == "false": + self.local_dir_use_symlinks = False + elif use_symlinks_lowercase == "auto": + self.local_dir_use_symlinks = "auto" + else: + raise ValueError( + f"'{args.local_dir_use_symlinks}' is not a valid value for `local_dir_use_symlinks`. It must be either" + " 'auto', 'True' or 'False'." + ) + + def run(self) -> None: + if self.quiet: + disable_progress_bars() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + print(self._download()) # Print path to downloaded files + enable_progress_bars() + else: + logging.set_verbosity_info() + print(self._download()) # Print path to downloaded files + logging.set_verbosity_warning() + + def _download(self) -> str: + # Warn user if patterns are ignored + if len(self.filenames) > 0: + if self.include is not None and len(self.include) > 0: + warnings.warn("Ignoring `--include` since filenames have being explicitly set.") + if self.exclude is not None and len(self.exclude) > 0: + warnings.warn("Ignoring `--exclude` since filenames have being explicitly set.") + + if not HF_HUB_ENABLE_HF_TRANSFER: + logger.info( + "Consider using `hf_transfer` for faster downloads. This solution comes with some limitations. See" + " https://huggingface.co/docs/huggingface_hub/hf_transfer for more details." + ) + + # Single file to download: use `hf_hub_download` + if len(self.filenames) == 1: + return hf_hub_download( + repo_id=self.repo_id, + repo_type=self.repo_type, + revision=self.revision, + filename=self.filenames[0], + cache_dir=self.cache_dir, + resume_download=self.resume_download, + force_download=self.force_download, + token=self.token, + local_dir=self.local_dir, + local_dir_use_symlinks=self.local_dir_use_symlinks, + library_name="huggingface-cli", + ) + + # Otherwise: use `snapshot_download` to ensure all files comes from same revision + elif len(self.filenames) == 0: + allow_patterns = self.include + ignore_patterns = self.exclude + else: + allow_patterns = self.filenames + ignore_patterns = None + + return snapshot_download( + repo_id=self.repo_id, + repo_type=self.repo_type, + revision=self.revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + resume_download=self.resume_download, + force_download=self.force_download, + cache_dir=self.cache_dir, + token=self.token, + local_dir=self.local_dir, + local_dir_use_symlinks=self.local_dir_use_symlinks, + library_name="huggingface-cli", + ) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/env.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..23f2828bbfebda0a633b4b3c6883432e4a534c79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/env.py @@ -0,0 +1,36 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to print information about the environment. + +Usage: + huggingface-cli env +""" + +from argparse import _SubParsersAction + +from ..utils import dump_environment_info +from . import BaseHuggingfaceCLICommand + + +class EnvironmentCommand(BaseHuggingfaceCLICommand): + def __init__(self, args): + self.args = args + + @staticmethod + def register_subcommand(parser: _SubParsersAction): + env_parser = parser.add_parser("env", help="Print information about the environment.") + env_parser.set_defaults(func=EnvironmentCommand) + + def run(self) -> None: + dump_environment_info() diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..39b6dfe49ab681f80dea6751e473843e5f685ff3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import ArgumentParser + +from huggingface_hub.commands.delete_cache import DeleteCacheCommand +from huggingface_hub.commands.download import DownloadCommand +from huggingface_hub.commands.env import EnvironmentCommand +from huggingface_hub.commands.lfs import LfsCommands +from huggingface_hub.commands.scan_cache import ScanCacheCommand +from huggingface_hub.commands.upload import UploadCommand +from huggingface_hub.commands.user import UserCommands + + +def main(): + parser = ArgumentParser("huggingface-cli", usage="huggingface-cli []") + commands_parser = parser.add_subparsers(help="huggingface-cli command helpers") + + # Register commands + EnvironmentCommand.register_subcommand(commands_parser) + UserCommands.register_subcommand(commands_parser) + UploadCommand.register_subcommand(commands_parser) + DownloadCommand.register_subcommand(commands_parser) + LfsCommands.register_subcommand(commands_parser) + ScanCacheCommand.register_subcommand(commands_parser) + DeleteCacheCommand.register_subcommand(commands_parser) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + service = args.func(args) + service.run() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/lfs.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/lfs.py new file mode 100644 index 0000000000000000000000000000000000000000..4dbf3cf55c67beebf4e6959ef180b30e29341a7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/lfs.py @@ -0,0 +1,199 @@ +""" +Implementation of a custom transfer agent for the transfer type "multipart" for +git-lfs. + +Inspired by: +github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py + +Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md + + +To launch debugger while developing: + +``` [lfs "customtransfer.multipart"] +path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678 +--wait-for-client +/path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py +lfs-multipart-upload ```""" + +import json +import os +import subprocess +import sys +from argparse import _SubParsersAction +from typing import Dict, List, Optional + +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND, SliceFileObj + +from ..utils import get_session, hf_raise_for_status, logging + + +logger = logging.get_logger(__name__) + + +class LfsCommands(BaseHuggingfaceCLICommand): + """ + Implementation of a custom transfer agent for the transfer type "multipart" + for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom + transfer agent is: + https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md + + This introduces two commands to the CLI: + + 1. $ huggingface-cli lfs-enable-largefiles + + This should be executed once for each model repo that contains a model file + >5GB. It's documented in the error message you get if you just try to git + push a 5GB file without having enabled it before. + + 2. $ huggingface-cli lfs-multipart-upload + + This command is called by lfs directly and is not meant to be called by the + user. + """ + + @staticmethod + def register_subcommand(parser: _SubParsersAction): + enable_parser = parser.add_parser( + "lfs-enable-largefiles", help="Configure your repository to enable upload of files > 5GB." + ) + enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.") + enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args)) + + # Command will get called by git-lfs, do not call it directly. + upload_parser = parser.add_parser(LFS_MULTIPART_UPLOAD_COMMAND, add_help=False) + upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args)) + + +class LfsEnableCommand: + def __init__(self, args): + self.args = args + + def run(self): + local_path = os.path.abspath(self.args.path) + if not os.path.isdir(local_path): + print("This does not look like a valid git repo.") + exit(1) + subprocess.run( + "git config lfs.customtransfer.multipart.path huggingface-cli".split(), + check=True, + cwd=local_path, + ) + subprocess.run( + f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(), + check=True, + cwd=local_path, + ) + print("Local repo set up for largefiles") + + +def write_msg(msg: Dict): + """Write out the message in Line delimited JSON.""" + msg_str = json.dumps(msg) + "\n" + sys.stdout.write(msg_str) + sys.stdout.flush() + + +def read_msg() -> Optional[Dict]: + """Read Line delimited JSON from stdin.""" + msg = json.loads(sys.stdin.readline().strip()) + + if "terminate" in (msg.get("type"), msg.get("event")): + # terminate message received + return None + + if msg.get("event") not in ("download", "upload"): + logger.critical("Received unexpected message") + sys.exit(1) + + return msg + + +class LfsUploadCommand: + def __init__(self, args) -> None: + self.args = args + + def run(self) -> None: + # Immediately after invoking a custom transfer process, git-lfs + # sends initiation data to the process over stdin. + # This tells the process useful information about the configuration. + init_msg = json.loads(sys.stdin.readline().strip()) + if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"): + write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}}) + sys.exit(1) + + # The transfer process should use the information it needs from the + # initiation structure, and also perform any one-off setup tasks it + # needs to do. It should then respond on stdout with a simple empty + # confirmation structure, as follows: + write_msg({}) + + # After the initiation exchange, git-lfs will send any number of + # transfer requests to the stdin of the transfer process, in a serial sequence. + while True: + msg = read_msg() + if msg is None: + # When all transfers have been processed, git-lfs will send + # a terminate event to the stdin of the transfer process. + # On receiving this message the transfer process should + # clean up and terminate. No response is expected. + sys.exit(0) + + oid = msg["oid"] + filepath = msg["path"] + completion_url = msg["action"]["href"] + header = msg["action"]["header"] + chunk_size = int(header.pop("chunk_size")) + presigned_urls: List[str] = list(header.values()) + + # Send a "started" progress event to allow other workers to start. + # Otherwise they're delayed until first "progress" event is reported, + # i.e. after the first 5GB by default (!) + write_msg( + { + "event": "progress", + "oid": oid, + "bytesSoFar": 1, + "bytesSinceLast": 0, + } + ) + + parts = [] + with open(filepath, "rb") as file: + for i, presigned_url in enumerate(presigned_urls): + with SliceFileObj( + file, + seek_from=i * chunk_size, + read_limit=chunk_size, + ) as data: + r = get_session().put(presigned_url, data=data) + hf_raise_for_status(r) + parts.append( + { + "etag": r.headers.get("etag"), + "partNumber": i + 1, + } + ) + # In order to support progress reporting while data is uploading / downloading, + # the transfer process should post messages to stdout + write_msg( + { + "event": "progress", + "oid": oid, + "bytesSoFar": (i + 1) * chunk_size, + "bytesSinceLast": chunk_size, + } + ) + # Not precise but that's ok. + + r = get_session().post( + completion_url, + json={ + "oid": oid, + "parts": parts, + }, + ) + hf_raise_for_status(r) + + write_msg({"event": "complete", "oid": oid}) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/scan_cache.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/scan_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ab3399be86799fe2cd79f1feb515994a0f479f --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/scan_cache.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to scan the HF cache directory. + +Usage: + huggingface-cli scan-cache + huggingface-cli scan-cache -v + huggingface-cli scan-cache -vvv + huggingface-cli scan-cache --dir ~/.cache/huggingface/hub +""" + +import time +from argparse import Namespace, _SubParsersAction +from typing import Optional + +from ..utils import CacheNotFound, HFCacheInfo, scan_cache_dir +from . import BaseHuggingfaceCLICommand +from ._cli_utils import ANSI, tabulate + + +class ScanCacheCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + scan_cache_parser = parser.add_parser("scan-cache", help="Scan cache directory.") + + scan_cache_parser.add_argument( + "--dir", + type=str, + default=None, + help="cache directory to scan (optional). Default to the default HuggingFace cache.", + ) + scan_cache_parser.add_argument( + "-v", + "--verbose", + action="count", + default=0, + help="show a more verbose output", + ) + scan_cache_parser.set_defaults(func=ScanCacheCommand) + + def __init__(self, args: Namespace) -> None: + self.verbosity: int = args.verbose + self.cache_dir: Optional[str] = args.dir + + def run(self): + try: + t0 = time.time() + hf_cache_info = scan_cache_dir(self.cache_dir) + t1 = time.time() + except CacheNotFound as exc: + cache_dir = exc.cache_dir + print(f"Cache directory not found: {cache_dir}") + return + + self._print_hf_cache_info_as_table(hf_cache_info) + + print( + f"\nDone in {round(t1-t0,1)}s. Scanned {len(hf_cache_info.repos)} repo(s)" + f" for a total of {ANSI.red(hf_cache_info.size_on_disk_str)}." + ) + if len(hf_cache_info.warnings) > 0: + message = f"Got {len(hf_cache_info.warnings)} warning(s) while scanning." + if self.verbosity >= 3: + print(ANSI.gray(message)) + for warning in hf_cache_info.warnings: + print(ANSI.gray(warning)) + else: + print(ANSI.gray(message + " Use -vvv to print details.")) + + def _print_hf_cache_info_as_table(self, hf_cache_info: HFCacheInfo) -> None: + if self.verbosity == 0: + print( + tabulate( + rows=[ + [ + repo.repo_id, + repo.repo_type, + "{:>12}".format(repo.size_on_disk_str), + repo.nb_files, + repo.last_accessed_str, + repo.last_modified_str, + ", ".join(sorted(repo.refs)), + str(repo.repo_path), + ] + for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path) + ], + headers=[ + "REPO ID", + "REPO TYPE", + "SIZE ON DISK", + "NB FILES", + "LAST_ACCESSED", + "LAST_MODIFIED", + "REFS", + "LOCAL PATH", + ], + ) + ) + else: + print( + tabulate( + rows=[ + [ + repo.repo_id, + repo.repo_type, + revision.commit_hash, + "{:>12}".format(revision.size_on_disk_str), + revision.nb_files, + revision.last_modified_str, + ", ".join(sorted(revision.refs)), + str(revision.snapshot_path), + ] + for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path) + for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash) + ], + headers=[ + "REPO ID", + "REPO TYPE", + "REVISION", + "SIZE ON DISK", + "NB FILES", + "LAST_MODIFIED", + "REFS", + "LOCAL PATH", + ], + ) + ) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/upload.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/upload.py new file mode 100644 index 0000000000000000000000000000000000000000..a42dd0f8824d067b1c7952080a075c6f67cee7c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/upload.py @@ -0,0 +1,298 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to upload a repo or file with the CLI. + +Usage: + # Upload file (implicit) + huggingface-cli upload my-cool-model ./my-cool-model.safetensors + + # Upload file (explicit) + huggingface-cli upload my-cool-model ./my-cool-model.safetensors model.safetensors + + # Upload directory (implicit). If `my-cool-model/` is a directory it will be uploaded, otherwise an exception is raised. + huggingface-cli upload my-cool-model + + # Upload directory (explicit) + huggingface-cli upload my-cool-model ./models/my-cool-model . + + # Upload filtered directory (example: tensorboard logs except for the last run) + huggingface-cli upload my-cool-model ./model/training /logs --include "*.tfevents.*" --exclude "*20230905*" + + # Upload private dataset + huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type=dataset --private + + # Upload with token + huggingface-cli upload Wauplin/my-cool-model --token=hf_**** + + # Sync local Space with Hub (upload new files, delete removed files) + huggingface-cli upload Wauplin/space-example --repo-type=space --exclude="/logs/*" --delete="*" --commit-message="Sync local Space with Hub" + + # Schedule commits every 30 minutes + huggingface-cli upload Wauplin/my-cool-model --every=30 +""" + +import os +import time +import warnings +from argparse import Namespace, _SubParsersAction +from typing import List, Optional + +from huggingface_hub import logging +from huggingface_hub._commit_scheduler import CommitScheduler +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.constants import HF_HUB_ENABLE_HF_TRANSFER +from huggingface_hub.hf_api import HfApi +from huggingface_hub.utils import RevisionNotFoundError, disable_progress_bars, enable_progress_bars + + +logger = logging.get_logger(__name__) + + +class UploadCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + upload_parser = parser.add_parser("upload", help="Upload a file or a folder to a repo on the Hub") + upload_parser.add_argument( + "repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)." + ) + upload_parser.add_argument( + "local_path", nargs="?", help="Local path to the file or folder to upload. Defaults to current directory." + ) + upload_parser.add_argument( + "path_in_repo", + nargs="?", + help="Path of the file or folder in the repo. Defaults to the relative path of the file or folder.", + ) + upload_parser.add_argument( + "--repo-type", + choices=["model", "dataset", "space"], + default="model", + help="Type of the repo to upload to (e.g. `dataset`).", + ) + upload_parser.add_argument( + "--revision", + type=str, + help=( + "An optional Git revision to push to. It can be a branch name or a PR reference. If revision does not" + " exist and `--create-pr` is not set, a branch will be automatically created." + ), + ) + upload_parser.add_argument( + "--private", + action="store_true", + help=( + "Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already" + " exists." + ), + ) + upload_parser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.") + upload_parser.add_argument( + "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload." + ) + upload_parser.add_argument( + "--delete", + nargs="*", + type=str, + help="Glob patterns for file to be deleted from the repo while committing.", + ) + upload_parser.add_argument( + "--commit-message", type=str, help="The summary / title / first line of the generated commit." + ) + upload_parser.add_argument("--commit-description", type=str, help="The description of the generated commit.") + upload_parser.add_argument( + "--create-pr", action="store_true", help="Whether to upload content as a new Pull Request." + ) + upload_parser.add_argument( + "--every", + type=float, + help="If set, a background job is scheduled to create commits every `every` minutes.", + ) + upload_parser.add_argument( + "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens" + ) + upload_parser.add_argument( + "--quiet", + action="store_true", + help="If True, progress bars are disabled and only the path to the uploaded files is printed.", + ) + upload_parser.set_defaults(func=UploadCommand) + + def __init__(self, args: Namespace) -> None: + self.repo_id: str = args.repo_id + self.repo_type: Optional[str] = args.repo_type + self.revision: Optional[str] = args.revision + self.private: bool = args.private + + self.include: Optional[List[str]] = args.include + self.exclude: Optional[List[str]] = args.exclude + self.delete: Optional[List[str]] = args.delete + + self.commit_message: Optional[str] = args.commit_message + self.commit_description: Optional[str] = args.commit_description + self.create_pr: bool = args.create_pr + self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli") + self.quiet: bool = args.quiet # disable warnings and progress bars + + # Check `--every` is valid + if args.every is not None and args.every <= 0: + raise ValueError(f"`every` must be a positive value (got '{args.every}')") + self.every: Optional[float] = args.every + + # Resolve `local_path` and `path_in_repo` + repo_name: str = args.repo_id.split("/")[-1] # e.g. "Wauplin/my-cool-model" => "my-cool-model" + self.local_path: str + self.path_in_repo: str + if args.local_path is None and os.path.isfile(repo_name): + # Implicit case 1: user provided only a repo_id which happen to be a local file as well => upload it with same name + self.local_path = repo_name + self.path_in_repo = repo_name + elif args.local_path is None and os.path.isdir(repo_name): + # Implicit case 2: user provided only a repo_id which happen to be a local folder as well => upload it at root + self.local_path = repo_name + self.path_in_repo = "." + elif args.local_path is None: + # Implicit case 3: user provided only a repo_id that does not match a local file or folder + # => the user must explicitly provide a local_path => raise exception + raise ValueError(f"'{repo_name}' is not a local file or folder. Please set `local_path` explicitly.") + elif args.path_in_repo is None and os.path.isfile(args.local_path): + # Explicit local path to file, no path in repo => upload it at root with same name + self.local_path = args.local_path + self.path_in_repo = os.path.basename(args.local_path) + elif args.path_in_repo is None: + # Explicit local path to folder, no path in repo => upload at root + self.local_path = args.local_path + self.path_in_repo = "." + else: + # Finally, if both paths are explicit + self.local_path = args.local_path + self.path_in_repo = args.path_in_repo + + def run(self) -> None: + if self.quiet: + disable_progress_bars() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + print(self._upload()) + enable_progress_bars() + else: + logging.set_verbosity_info() + print(self._upload()) + logging.set_verbosity_warning() + + def _upload(self) -> str: + if os.path.isfile(self.local_path): + if self.include is not None and len(self.include) > 0: + warnings.warn("Ignoring `--include` since a single file is uploaded.") + if self.exclude is not None and len(self.exclude) > 0: + warnings.warn("Ignoring `--exclude` since a single file is uploaded.") + if self.delete is not None and len(self.delete) > 0: + warnings.warn("Ignoring `--delete` since a single file is uploaded.") + + if not HF_HUB_ENABLE_HF_TRANSFER: + logger.info( + "Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See" + " https://huggingface.co/docs/huggingface_hub/hf_transfer for more details." + ) + + # Schedule commits if `every` is set + if self.every is not None: + if os.path.isfile(self.local_path): + # If file => watch entire folder + use allow_patterns + folder_path = os.path.dirname(self.local_path) + path_in_repo = ( + self.path_in_repo[: -len(self.local_path)] # remove filename from path_in_repo + if self.path_in_repo.endswith(self.local_path) + else self.path_in_repo + ) + allow_patterns = [self.local_path] + ignore_patterns = [] + else: + folder_path = self.local_path + path_in_repo = self.path_in_repo + allow_patterns = self.include or [] + ignore_patterns = self.exclude or [] + if self.delete is not None and len(self.delete) > 0: + warnings.warn("Ignoring `--delete` when uploading with scheduled commits.") + + scheduler = CommitScheduler( + folder_path=folder_path, + repo_id=self.repo_id, + repo_type=self.repo_type, + revision=self.revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + path_in_repo=path_in_repo, + private=self.private, + every=self.every, + hf_api=self.api, + ) + print(f"Scheduling commits every {self.every} minutes to {scheduler.repo_id}.") + try: # Block main thread until KeyboardInterrupt + while True: + time.sleep(100) + except KeyboardInterrupt: + scheduler.stop() + return "Stopped scheduled commits." + + # Otherwise, create repo and proceed with the upload + if not os.path.isfile(self.local_path) and not os.path.isdir(self.local_path): + raise FileNotFoundError(f"No such file or directory: '{self.local_path}'.") + repo_id = self.api.create_repo( + repo_id=self.repo_id, + repo_type=self.repo_type, + exist_ok=True, + private=self.private, + space_sdk="gradio" if self.repo_type == "space" else None, + # ^ We don't want it to fail when uploading to a Space => let's set Gradio by default. + # ^ I'd rather not add CLI args to set it explicitly as we already have `huggingface-cli repo create` for that. + ).repo_id + + # Check if branch already exists and if not, create it + if self.revision is not None and not self.create_pr: + try: + self.api.repo_info(repo_id=repo_id, repo_type=self.repo_type, revision=self.revision) + except RevisionNotFoundError: + logger.info(f"Branch '{self.revision}' not found. Creating it...") + self.api.create_branch(repo_id=repo_id, repo_type=self.repo_type, branch=self.revision, exist_ok=True) + # ^ `exist_ok=True` to avoid race concurrency issues + + # File-based upload + if os.path.isfile(self.local_path): + return self.api.upload_file( + path_or_fileobj=self.local_path, + path_in_repo=self.path_in_repo, + repo_id=repo_id, + repo_type=self.repo_type, + revision=self.revision, + commit_message=self.commit_message, + commit_description=self.commit_description, + create_pr=self.create_pr, + ) + + # Folder-based upload + else: + return self.api.upload_folder( + folder_path=self.local_path, + path_in_repo=self.path_in_repo, + repo_id=repo_id, + repo_type=self.repo_type, + revision=self.revision, + commit_message=self.commit_message, + commit_description=self.commit_description, + create_pr=self.create_pr, + allow_patterns=self.include, + ignore_patterns=self.exclude, + delete_patterns=self.delete, + ) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/commands/user.py b/venv/lib/python3.10/site-packages/huggingface_hub/commands/user.py new file mode 100644 index 0000000000000000000000000000000000000000..8cde3ac04c8a0773cad0a767a8d14b36dce4ee2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/commands/user.py @@ -0,0 +1,188 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess +from argparse import _SubParsersAction + +from requests.exceptions import HTTPError + +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.constants import ( + ENDPOINT, + REPO_TYPES, + REPO_TYPES_URL_PREFIXES, + SPACES_SDK_TYPES, +) +from huggingface_hub.hf_api import HfApi + +from .._login import ( # noqa: F401 # for backward compatibility # noqa: F401 # for backward compatibility + NOTEBOOK_LOGIN_PASSWORD_HTML, + NOTEBOOK_LOGIN_TOKEN_HTML_END, + NOTEBOOK_LOGIN_TOKEN_HTML_START, + login, + logout, + notebook_login, +) +from ..utils import get_token +from ._cli_utils import ANSI + + +class UserCommands(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + login_parser = parser.add_parser("login", help="Log in using a token from huggingface.co/settings/tokens") + login_parser.add_argument( + "--token", + type=str, + help="Token generated from https://huggingface.co/settings/tokens", + ) + login_parser.add_argument( + "--add-to-git-credential", + action="store_true", + help="Optional: Save token to git credential helper.", + ) + login_parser.set_defaults(func=lambda args: LoginCommand(args)) + whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.") + whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args)) + logout_parser = parser.add_parser("logout", help="Log out") + logout_parser.set_defaults(func=lambda args: LogoutCommand(args)) + + # new system: git-based repo system + repo_parser = parser.add_parser("repo", help="{create} Commands to interact with your huggingface.co repos.") + repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands") + repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co") + repo_create_parser.add_argument( + "name", + type=str, + help="Name for your repo. Will be namespaced under your username to build the repo id.", + ) + repo_create_parser.add_argument( + "--type", + type=str, + help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.', + ) + repo_create_parser.add_argument("--organization", type=str, help="Optional: organization namespace.") + repo_create_parser.add_argument( + "--space_sdk", + type=str, + help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".', + choices=SPACES_SDK_TYPES, + ) + repo_create_parser.add_argument( + "-y", + "--yes", + action="store_true", + help="Optional: answer Yes to the prompt", + ) + repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args)) + + +class BaseUserCommand: + def __init__(self, args): + self.args = args + self._api = HfApi() + + +class LoginCommand(BaseUserCommand): + def run(self): + login(token=self.args.token, add_to_git_credential=self.args.add_to_git_credential) + + +class LogoutCommand(BaseUserCommand): + def run(self): + logout() + + +class WhoamiCommand(BaseUserCommand): + def run(self): + token = get_token() + if token is None: + print("Not logged in") + exit() + try: + info = self._api.whoami(token) + print(info["name"]) + orgs = [org["name"] for org in info["orgs"]] + if orgs: + print(ANSI.bold("orgs: "), ",".join(orgs)) + + if ENDPOINT != "https://huggingface.co": + print(f"Authenticated through private endpoint: {ENDPOINT}") + except HTTPError as e: + print(e) + print(ANSI.red(e.response.text)) + exit(1) + + +class RepoCreateCommand(BaseUserCommand): + def run(self): + token = get_token() + if token is None: + print("Not logged in") + exit(1) + try: + stdout = subprocess.check_output(["git", "--version"]).decode("utf-8") + print(ANSI.gray(stdout.strip())) + except FileNotFoundError: + print("Looks like you do not have git installed, please install.") + + try: + stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8") + print(ANSI.gray(stdout.strip())) + except FileNotFoundError: + print( + ANSI.red( + "Looks like you do not have git-lfs installed, please install." + " You can install from https://git-lfs.github.com/." + " Then run `git lfs install` (you only have to do this once)." + ) + ) + print("") + + user = self._api.whoami(token)["name"] + namespace = self.args.organization if self.args.organization is not None else user + + repo_id = f"{namespace}/{self.args.name}" + + if self.args.type not in REPO_TYPES: + print("Invalid repo --type") + exit(1) + + if self.args.type in REPO_TYPES_URL_PREFIXES: + prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id + else: + prefixed_repo_id = repo_id + + print(f"You are about to create {ANSI.bold(prefixed_repo_id)}") + + if not self.args.yes: + choice = input("Proceed? [Y/n] ").lower() + if not (choice == "" or choice == "y" or choice == "yes"): + print("Abort") + exit() + try: + url = self._api.create_repo( + repo_id=repo_id, + token=token, + repo_type=self.args.type, + space_sdk=self.args.space_sdk, + ) + except HTTPError as e: + print(e) + print(ANSI.red(e.response.text)) + exit(1) + print("\nYour repo now lives at:") + print(f" {ANSI.bold(url)}") + print("\nYou can clone it locally with the command below, and commit/push as usual.") + print(f"\n git clone {url}") + print("") diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ece102c4d7a7d1a6c5ee46e066a0a3b867e342e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py @@ -0,0 +1,115 @@ +# This file is auto-generated by `utils/generate_inference_types.py`. +# Do not modify it manually. +# +# ruff: noqa: F401 + +from .audio_classification import ( + AudioClassificationInput, + AudioClassificationOutputElement, + AudioClassificationParameters, +) +from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement +from .automatic_speech_recognition import ( + AutomaticSpeechRecognitionGenerationParameters, + AutomaticSpeechRecognitionInput, + AutomaticSpeechRecognitionOutput, + AutomaticSpeechRecognitionOutputChunk, + AutomaticSpeechRecognitionParameters, +) +from .base import BaseInferenceType +from .chat_completion import ( + ChatCompletionInput, + ChatCompletionInputMessage, + ChatCompletionOutput, + ChatCompletionOutputChoice, + ChatCompletionOutputChoiceMessage, + ChatCompletionStreamOutput, + ChatCompletionStreamOutputChoice, + ChatCompletionStreamOutputDelta, +) +from .depth_estimation import DepthEstimationInput, DepthEstimationOutput +from .document_question_answering import ( + DocumentQuestionAnsweringInput, + DocumentQuestionAnsweringInputData, + DocumentQuestionAnsweringOutputElement, + DocumentQuestionAnsweringParameters, +) +from .feature_extraction import FeatureExtractionInput +from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters +from .image_classification import ( + ImageClassificationInput, + ImageClassificationOutputElement, + ImageClassificationParameters, +) +from .image_segmentation import ImageSegmentationInput, ImageSegmentationOutputElement, ImageSegmentationParameters +from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize +from .image_to_text import ImageToTextGenerationParameters, ImageToTextInput, ImageToTextOutput, ImageToTextParameters +from .object_detection import ( + ObjectDetectionBoundingBox, + ObjectDetectionInput, + ObjectDetectionOutputElement, + ObjectDetectionParameters, +) +from .question_answering import ( + QuestionAnsweringInput, + QuestionAnsweringInputData, + QuestionAnsweringOutputElement, + QuestionAnsweringParameters, +) +from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData +from .summarization import SummarizationGenerationParameters, SummarizationInput, SummarizationOutput +from .table_question_answering import ( + TableQuestionAnsweringInput, + TableQuestionAnsweringInputData, + TableQuestionAnsweringOutputElement, +) +from .text2text_generation import Text2TextGenerationInput, Text2TextGenerationOutput, Text2TextGenerationParameters +from .text_classification import TextClassificationInput, TextClassificationOutputElement, TextClassificationParameters +from .text_generation import ( + TextGenerationInput, + TextGenerationOutput, + TextGenerationOutputDetails, + TextGenerationOutputSequenceDetails, + TextGenerationOutputToken, + TextGenerationParameters, + TextGenerationPrefillToken, + TextGenerationStreamDetails, + TextGenerationStreamOutput, +) +from .text_to_audio import TextToAudioGenerationParameters, TextToAudioInput, TextToAudioOutput, TextToAudioParameters +from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters, TextToImageTargetSize +from .token_classification import ( + TokenClassificationInput, + TokenClassificationOutputElement, + TokenClassificationParameters, +) +from .translation import TranslationGenerationParameters, TranslationInput, TranslationOutput +from .video_classification import ( + VideoClassificationInput, + VideoClassificationOutputElement, + VideoClassificationParameters, +) +from .visual_question_answering import ( + VisualQuestionAnsweringInput, + VisualQuestionAnsweringInputData, + VisualQuestionAnsweringOutputElement, + VisualQuestionAnsweringParameters, +) +from .zero_shot_classification import ( + ZeroShotClassificationInput, + ZeroShotClassificationInputData, + ZeroShotClassificationOutputElement, + ZeroShotClassificationParameters, +) +from .zero_shot_image_classification import ( + ZeroShotImageClassificationInput, + ZeroShotImageClassificationInputData, + ZeroShotImageClassificationOutputElement, + ZeroShotImageClassificationParameters, +) +from .zero_shot_object_detection import ( + ZeroShotObjectDetectionBoundingBox, + ZeroShotObjectDetectionInput, + ZeroShotObjectDetectionInputData, + ZeroShotObjectDetectionOutputElement, +) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb7bac1d546d8d3848ee999f889c009c6c621acd Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..424ea6e9e67531c8455874d1dbb92a6a06b33d5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b724f2ee2b90189f0f471cc9b58cb01dc43f96ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78620220a4f992ea6cb8150249ff07bb7b4a3510 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11a4eebd0227c5957343087b7dc15758b44758ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b3c69b23b77758c3df44b600794421206c2999b Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5192672d622b8e27407a6018012c926552e2747d Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d63800cba041253a809206bce700c717579f9b27 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae7611d38b3818d32f00979a2d6d09169bf4cc16 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09641139423c8ab3b69896b25165b39508d25ea7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..479baa97543bd7ce0ca7a0bef749a49fc6586c2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c11a7a391f038f5b0fc944579b06095e3824a579 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1830d0c5d47467cf9696c0f7ba4331e674d3c8c Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe74c88ac8a203fcc49194e5d3653a1c831901e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3940f49943add45cae47846141bf5c430376632c Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcdbf02a3eee510887ab16c4429c6a95b7c6881e Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a78b22c85fdc4e12a1c9f79d7ab81acce3418c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03336b64261a2259d41a964ea137260f82cdeda3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a597e849e7f8eab8e2cb782ed1d180e03cbb7fb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c050ce6480d223b9f393abc1b8ccb6dbc39dd337 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ebd7dd3565b37bec584696a10e2905574600d89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86fe52212b5f8efd72b3ae5796d214ed214cf4f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0db991e0225a0052987d830bc1a154369918884 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de2cc5a5662e9e496823014804e00da70c70b4d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85cd920edace67b673f031da5f9d26363d9805bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54e4fb0c3dd484ded662741411c29347b8c42884 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f817d42df542d40e64305b826d5ce020bca68ffb Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeba19382524c7a15bf971667883c6014b836515 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8dd3cf8edb09bfa409deeff362efbfa34a203a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea6168944b517d3be05105c817c512ad3ad77285 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3bc80b8a22c5fce44d1bdd37771baa3df987592 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..914ba44960b5edca2f182bd1c3f15e9f01bce3b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py @@ -0,0 +1,43 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Literal, Optional + +from .base import BaseInferenceType + + +ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] + + +@dataclass +class AudioClassificationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Audio Classification + """ + + function_to_apply: Optional["ClassificationOutputTransform"] = None + top_k: Optional[int] = None + """When specified, limits the output to the top K most probable classes.""" + + +@dataclass +class AudioClassificationInput(BaseInferenceType): + """Inputs for Audio Classification inference""" + + inputs: Any + """The input audio data""" + parameters: Optional[AudioClassificationParameters] = None + """Additional inference parameters""" + + +@dataclass +class AudioClassificationOutputElement(BaseInferenceType): + """Outputs for Audio Classification inference""" + + label: str + """The predicted class label.""" + score: float + """The corresponding probability.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..24a5238ab6b33ea13df79a1ea197b4f07b39c1ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py @@ -0,0 +1,116 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Literal, Optional, Union + +from .base import BaseInferenceType + + +EarlyStoppingEnum = Literal["never"] + + +@dataclass +class AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType): + """Parametrization of the text generation process + Ad-hoc parametrization of the text generation process + """ + + do_sample: Optional[bool] = None + """Whether to use sampling instead of greedy decoding when generating new tokens.""" + early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None + """Controls the stopping condition for beam-based methods.""" + epsilon_cutoff: Optional[float] = None + """If set to float strictly between 0 and 1, only tokens with a conditional probability + greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + """ + eta_cutoff: Optional[float] = None + """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + float strictly between 0 and 1, a token is only considered if it is greater than either + eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + for more details. + """ + max_length: Optional[int] = None + """The maximum length (in tokens) of the generated text, including the input.""" + max_new_tokens: Optional[int] = None + """The maximum number of tokens to generate. Takes precedence over maxLength.""" + min_length: Optional[int] = None + """The minimum length (in tokens) of the generated text, including the input.""" + min_new_tokens: Optional[int] = None + """The minimum number of tokens to generate. Takes precedence over maxLength.""" + num_beam_groups: Optional[int] = None + """Number of groups to divide num_beams into in order to ensure diversity among different + groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + """ + num_beams: Optional[int] = None + """Number of beams to use for beam search.""" + penalty_alpha: Optional[float] = None + """The value balances the model confidence and the degeneration penalty in contrastive + search decoding. + """ + temperature: Optional[float] = None + """The value used to modulate the next token probabilities.""" + top_k: Optional[int] = None + """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" + top_p: Optional[float] = None + """If set to float < 1, only the smallest set of most probable tokens with probabilities + that add up to top_p or higher are kept for generation. + """ + typical_p: Optional[float] = None + """Local typicality measures how similar the conditional probability of predicting a target + token next is to the expected conditional probability of predicting a random token next, + given the partial text already generated. If set to float < 1, the smallest set of the + most locally typical tokens with probabilities that add up to typical_p or higher are + kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + """ + use_cache: Optional[bool] = None + """Whether the model should use the past last key/values attentions to speed up decoding""" + + +@dataclass +class AutomaticSpeechRecognitionParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Automatic Speech Recognition + """ + + generate: Optional[AutomaticSpeechRecognitionGenerationParameters] = None + """Parametrization of the text generation process""" + return_timestamps: Optional[bool] = None + """Whether to output corresponding timestamps with the generated text""" + + +@dataclass +class AutomaticSpeechRecognitionInput(BaseInferenceType): + """Inputs for Automatic Speech Recognition inference""" + + inputs: Any + """The input audio data""" + parameters: Optional[AutomaticSpeechRecognitionParameters] = None + """Additional inference parameters""" + + +@dataclass +class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType): + text: str + """A chunk of text identified by the model""" + timestamps: List[float] + """The start and end timestamps corresponding with the text""" + + +@dataclass +class AutomaticSpeechRecognitionOutput(BaseInferenceType): + """Outputs of inference for the Automatic Speech Recognition task""" + + text: str + """The recognized text.""" + chunks: Optional[List[AutomaticSpeechRecognitionOutputChunk]] = None + """When returnTimestamps is enabled, chunks contains a list of audio chunks identified by + the model. + """ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py new file mode 100644 index 0000000000000000000000000000000000000000..8783484d2d466a74c1c634508c39a7e9cb2851a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py @@ -0,0 +1,149 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains a base class for all inference types.""" + +import inspect +import json +import warnings +from dataclasses import asdict, dataclass +from typing import Any, Dict, List, Type, TypeVar, Union, get_args + + +T = TypeVar("T", bound="BaseInferenceType") + + +@dataclass +class BaseInferenceType(dict): + """Base class for all inference types. + + Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future. + + Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields + are made optional, and non-expected fields are added as dict attributes). + """ + + @classmethod + def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]: + """Alias to parse server response and return a single instance. + + See `parse_obj` for more details. + """ + output = cls.parse_obj(data) + if not isinstance(output, list): + raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.") + return output + + @classmethod + def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T: + """Alias to parse server response and return a single instance. + + See `parse_obj` for more details. + """ + output = cls.parse_obj(data) + if isinstance(output, list): + raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.") + return output + + @classmethod + def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]: + """Parse server response as a dataclass or list of dataclasses. + + To enable future-compatibility, we want to handle cases where the server return more fields than expected. + In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are + added as dict attributes. + """ + # Parse server response (from bytes) + if isinstance(data, bytes): + data = data.decode() + if isinstance(data, str): + data = json.loads(data) + + # If a list, parse each item individually + if isinstance(data, List): + return [cls.parse_obj(d) for d in data] # type: ignore [misc] + + # At this point, we expect a dict + if not isinstance(data, dict): + raise ValueError(f"Invalid data type: {type(data)}") + + init_values = {} + other_values = {} + for key, value in data.items(): + key = normalize_key(key) + if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init: + if isinstance(value, dict) or isinstance(value, list): + field_type = cls.__dataclass_fields__[key].type + + # if `field_type` is a `BaseInferenceType`, parse it + if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType): + value = field_type.parse_obj(value) + + # otherwise, recursively parse nested dataclasses (if possible) + # `get_args` returns handle Union and Optional for us + else: + expected_types = get_args(field_type) + for expected_type in expected_types: + if getattr(expected_type, "_name", None) == "List": + expected_type = get_args(expected_type)[ + 0 + ] # assume same type for all items in the list + if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType): + value = expected_type.parse_obj(value) + break + init_values[key] = value + else: + other_values[key] = value + + # Make all missing fields default to None + # => ensure that dataclass initialization will never fail even if the server does not return all fields. + for key in cls.__dataclass_fields__: + if key not in init_values: + init_values[key] = None + + # Initialize dataclass with expected values + item = cls(**init_values) + + # Add remaining fields as dict attributes + item.update(other_values) + return item + + def __post_init__(self): + self.update(asdict(self)) + + def __setitem__(self, __key: Any, __value: Any) -> None: + # Hacky way to keep dataclass values in sync when dict is updated + super().__setitem__(__key, __value) + if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value: + self.__setattr__(__key, __value) + return + + def __setattr__(self, __name: str, __value: Any) -> None: + # Hacky way to keep dict values is sync when dataclass is updated + super().__setattr__(__name, __value) + if self.get(__name) != __value: + self[__name] = __value + return + + def __getitem__(self, __key: Any) -> Any: + warnings.warn( + f"Accessing '{self.__class__.__name__}' values through dict is deprecated and " + "will be removed from version '0.25'. Use dataclass attributes instead.", + FutureWarning, + ) + return super().__getitem__(__key) + + +def normalize_key(key: str) -> str: + # e.g "content-type" -> "content_type", "Accept" -> "accept" + return key.replace("-", "_").replace(" ", "_").lower() diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py new file mode 100644 index 0000000000000000000000000000000000000000..43e24f814b5ea8c4fea6d0ea00d6fef3883da8d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py @@ -0,0 +1,106 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import List, Literal, Optional, Union + +from .base import BaseInferenceType + + +ChatCompletionMessageRole = Literal["assistant", "system", "user"] + + +@dataclass +class ChatCompletionInputMessage(BaseInferenceType): + content: str + """The content of the message.""" + role: "ChatCompletionMessageRole" + + +@dataclass +class ChatCompletionInput(BaseInferenceType): + """Inputs for ChatCompletion inference""" + + messages: List[ChatCompletionInputMessage] + frequency_penalty: Optional[float] = None + """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + """ + max_tokens: Optional[int] = None + """The maximum number of tokens that can be generated in the chat completion.""" + seed: Optional[int] = None + """The random sampling seed.""" + stop: Optional[Union[List[str], str]] = None + """Stop generating tokens if a stop token is generated.""" + stream: Optional[bool] = None + """If set, partial message deltas will be sent.""" + temperature: Optional[float] = None + """The value used to modulate the logits distribution.""" + top_p: Optional[float] = None + """If set to < 1, only the smallest set of most probable tokens with probabilities that add + up to `top_p` or higher are kept for generation. + """ + + +ChatCompletionFinishReason = Literal["length", "eos_token", "stop_sequence"] + + +@dataclass +class ChatCompletionOutputChoiceMessage(BaseInferenceType): + content: str + """The content of the chat completion message.""" + role: "ChatCompletionMessageRole" + + +@dataclass +class ChatCompletionOutputChoice(BaseInferenceType): + finish_reason: "ChatCompletionFinishReason" + """The reason why the generation was stopped.""" + index: int + """The index of the choice in the list of choices.""" + message: ChatCompletionOutputChoiceMessage + + +@dataclass +class ChatCompletionOutput(BaseInferenceType): + """Outputs for Chat Completion inference""" + + choices: List[ChatCompletionOutputChoice] + """A list of chat completion choices.""" + created: int + """The Unix timestamp (in seconds) of when the chat completion was created.""" + + +@dataclass +class ChatCompletionStreamOutputDelta(BaseInferenceType): + """A chat completion delta generated by streamed model responses.""" + + content: Optional[str] = None + """The contents of the chunk message.""" + role: Optional[str] = None + """The role of the author of this message.""" + + +@dataclass +class ChatCompletionStreamOutputChoice(BaseInferenceType): + delta: ChatCompletionStreamOutputDelta + """A chat completion delta generated by streamed model responses.""" + index: int + """The index of the choice in the list of choices.""" + finish_reason: Optional["ChatCompletionFinishReason"] = None + """The reason why the generation was stopped.""" + + +@dataclass +class ChatCompletionStreamOutput(BaseInferenceType): + """Chat Completion Stream Output""" + + choices: List[ChatCompletionStreamOutputChoice] + """A list of chat completion choices.""" + created: int + """The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has + the same timestamp. + """ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py new file mode 100644 index 0000000000000000000000000000000000000000..fbaa5feeadff9721ba543cb77121b98c17e3ee8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py @@ -0,0 +1,29 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, Optional + +from .base import BaseInferenceType + + +@dataclass +class DepthEstimationInput(BaseInferenceType): + """Inputs for Depth Estimation inference""" + + inputs: Any + """The input image data""" + parameters: Optional[Dict[str, Any]] = None + """Additional inference parameters""" + + +@dataclass +class DepthEstimationOutput(BaseInferenceType): + """Outputs of inference for the Depth Estimation task""" + + depth: Any + """The predicted depth as an image""" + predicted_depth: Any + """The predicted depth as a tensor""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..df563e671a68926df1d96898879ae775f0d20a6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py @@ -0,0 +1,19 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, Optional + +from .base import BaseInferenceType + + +@dataclass +class FeatureExtractionInput(BaseInferenceType): + """Inputs for Text Embedding inference""" + + inputs: str + """The text to get the embeddings of""" + parameters: Optional[Dict[str, Any]] = None + """Additional inference parameters""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..fd52db005a0be62e7f063c0a16569a1fc2b273da --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py @@ -0,0 +1,43 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Literal, Optional + +from .base import BaseInferenceType + + +ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] + + +@dataclass +class ImageClassificationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Image Classification + """ + + function_to_apply: Optional["ClassificationOutputTransform"] = None + top_k: Optional[int] = None + """When specified, limits the output to the top K most probable classes.""" + + +@dataclass +class ImageClassificationInput(BaseInferenceType): + """Inputs for Image Classification inference""" + + inputs: Any + """The input image data""" + parameters: Optional[ImageClassificationParameters] = None + """Additional inference parameters""" + + +@dataclass +class ImageClassificationOutputElement(BaseInferenceType): + """Outputs of inference for the Image Classification task""" + + label: str + """The predicted class label.""" + score: float + """The corresponding probability.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..3810fc594af5cf0712cb0cb0db077383220b175a --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py @@ -0,0 +1,77 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Optional + +from .base import BaseInferenceType + + +@dataclass +class QuestionAnsweringInputData(BaseInferenceType): + """One (context, question) pair to answer""" + + context: str + """The context to be used for answering the question""" + question: str + """The question to be answered""" + + +@dataclass +class QuestionAnsweringParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Question Answering + """ + + align_to_words: Optional[bool] = None + """Attempts to align the answer to real words. Improves quality on space separated + languages. Might hurt on non-space-separated languages (like Japanese or Chinese) + """ + doc_stride: Optional[int] = None + """If the context is too long to fit with the question for the model, it will be split in + several chunks with some overlap. This argument controls the size of that overlap. + """ + handle_impossible_answer: Optional[bool] = None + """Whether to accept impossible as an answer.""" + max_answer_len: Optional[int] = None + """The maximum length of predicted answers (e.g., only answers with a shorter length are + considered). + """ + max_question_len: Optional[int] = None + """The maximum length of the question after tokenization. It will be truncated if needed.""" + max_seq_len: Optional[int] = None + """The maximum length of the total sentence (context + question) in tokens of each chunk + passed to the model. The context will be split in several chunks (using docStride as + overlap) if needed. + """ + top_k: Optional[int] = None + """The number of answers to return (will be chosen by order of likelihood). Note that we + return less than topk answers if there are not enough options available within the + context. + """ + + +@dataclass +class QuestionAnsweringInput(BaseInferenceType): + """Inputs for Question Answering inference""" + + inputs: QuestionAnsweringInputData + """One (context, question) pair to answer""" + parameters: Optional[QuestionAnsweringParameters] = None + """Additional inference parameters""" + + +@dataclass +class QuestionAnsweringOutputElement(BaseInferenceType): + """Outputs of inference for the Question Answering task""" + + answer: str + """The answer to the question.""" + end: int + """The character position in the input where the answer ends.""" + score: float + """The probability associated to the answer.""" + start: int + """The character position in the input where the answer begins.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/sentence_similarity.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/sentence_similarity.py new file mode 100644 index 0000000000000000000000000000000000000000..944bfccbf76e8c322dbf95a286746c6e1e25a55b --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/sentence_similarity.py @@ -0,0 +1,28 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from .base import BaseInferenceType + + +@dataclass +class SentenceSimilarityInputData(BaseInferenceType): + sentences: List[str] + """A list of strings which will be compared against the source_sentence.""" + source_sentence: str + """The string that you wish to compare the other strings with. This can be a phrase, + sentence, or longer passage, depending on the model being used. + """ + + +@dataclass +class SentenceSimilarityInput(BaseInferenceType): + """Inputs for Sentence similarity inference""" + + inputs: SentenceSimilarityInputData + parameters: Optional[Dict[str, Any]] = None + """Additional inference parameters""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..955494c5ef6b86e12b3927dfd90e44a5db25c2e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py @@ -0,0 +1,45 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, Literal, Optional + +from .base import BaseInferenceType + + +Text2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] + + +@dataclass +class Text2TextGenerationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text2text Generation + """ + + clean_up_tokenization_spaces: Optional[bool] = None + """Whether to clean up the potential extra spaces in the text output.""" + generate_parameters: Optional[Dict[str, Any]] = None + """Additional parametrization of the text generation algorithm""" + truncation: Optional["Text2TextGenerationTruncationStrategy"] = None + """The truncation strategy to use""" + + +@dataclass +class Text2TextGenerationInput(BaseInferenceType): + """Inputs for Text2text Generation inference""" + + inputs: str + """The input text data""" + parameters: Optional[Text2TextGenerationParameters] = None + """Additional inference parameters""" + + +@dataclass +class Text2TextGenerationOutput(BaseInferenceType): + """Outputs of inference for the Text2text Generation task""" + + generated_text: Any + text2_text_generation_output_generated_text: Optional[str] = None + """The generated text.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..bf61a4eebcf367b4ab15e8970bfac8e1d8f8458d --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py @@ -0,0 +1,43 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Literal, Optional + +from .base import BaseInferenceType + + +ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] + + +@dataclass +class TextClassificationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text Classification + """ + + function_to_apply: Optional["ClassificationOutputTransform"] = None + top_k: Optional[int] = None + """When specified, limits the output to the top K most probable classes.""" + + +@dataclass +class TextClassificationInput(BaseInferenceType): + """Inputs for Text Classification inference""" + + inputs: str + """The text to classify""" + parameters: Optional[TextClassificationParameters] = None + """Additional inference parameters""" + + +@dataclass +class TextClassificationOutputElement(BaseInferenceType): + """Outputs of inference for the Text Classification task""" + + label: str + """The predicted class label.""" + score: float + """The corresponding probability.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..dd8369de4b26cf8ef38cf8cfbafdc1a8bb12d552 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py @@ -0,0 +1,105 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Literal, Optional, Union + +from .base import BaseInferenceType + + +EarlyStoppingEnum = Literal["never"] + + +@dataclass +class TextToAudioGenerationParameters(BaseInferenceType): + """Parametrization of the text generation process + Ad-hoc parametrization of the text generation process + """ + + do_sample: Optional[bool] = None + """Whether to use sampling instead of greedy decoding when generating new tokens.""" + early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None + """Controls the stopping condition for beam-based methods.""" + epsilon_cutoff: Optional[float] = None + """If set to float strictly between 0 and 1, only tokens with a conditional probability + greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + """ + eta_cutoff: Optional[float] = None + """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + float strictly between 0 and 1, a token is only considered if it is greater than either + eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + for more details. + """ + max_length: Optional[int] = None + """The maximum length (in tokens) of the generated text, including the input.""" + max_new_tokens: Optional[int] = None + """The maximum number of tokens to generate. Takes precedence over maxLength.""" + min_length: Optional[int] = None + """The minimum length (in tokens) of the generated text, including the input.""" + min_new_tokens: Optional[int] = None + """The minimum number of tokens to generate. Takes precedence over maxLength.""" + num_beam_groups: Optional[int] = None + """Number of groups to divide num_beams into in order to ensure diversity among different + groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + """ + num_beams: Optional[int] = None + """Number of beams to use for beam search.""" + penalty_alpha: Optional[float] = None + """The value balances the model confidence and the degeneration penalty in contrastive + search decoding. + """ + temperature: Optional[float] = None + """The value used to modulate the next token probabilities.""" + top_k: Optional[int] = None + """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" + top_p: Optional[float] = None + """If set to float < 1, only the smallest set of most probable tokens with probabilities + that add up to top_p or higher are kept for generation. + """ + typical_p: Optional[float] = None + """Local typicality measures how similar the conditional probability of predicting a target + token next is to the expected conditional probability of predicting a random token next, + given the partial text already generated. If set to float < 1, the smallest set of the + most locally typical tokens with probabilities that add up to typical_p or higher are + kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + """ + use_cache: Optional[bool] = None + """Whether the model should use the past last key/values attentions to speed up decoding""" + + +@dataclass +class TextToAudioParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text To Audio + """ + + generate: Optional[TextToAudioGenerationParameters] = None + """Parametrization of the text generation process""" + + +@dataclass +class TextToAudioInput(BaseInferenceType): + """Inputs for Text To Audio inference""" + + inputs: str + """The input text data""" + parameters: Optional[TextToAudioParameters] = None + """Additional inference parameters""" + + +@dataclass +class TextToAudioOutput(BaseInferenceType): + """Outputs of inference for the Text To Audio task""" + + audio: Any + """The generated audio waveform.""" + sampling_rate: Any + text_to_audio_output_sampling_rate: Optional[float] = None + """The sampling rate of the generated audio waveform.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/translation.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/translation.py new file mode 100644 index 0000000000000000000000000000000000000000..e06ad2b72d35dcf814b110112cd882cb4b4cc616 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/translation.py @@ -0,0 +1,46 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, Literal, Optional + +from .base import BaseInferenceType + + +TranslationGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] + + +@dataclass +class TranslationGenerationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text2text Generation + """ + + clean_up_tokenization_spaces: Optional[bool] = None + """Whether to clean up the potential extra spaces in the text output.""" + generate_parameters: Optional[Dict[str, Any]] = None + """Additional parametrization of the text generation algorithm""" + truncation: Optional["TranslationGenerationTruncationStrategy"] = None + """The truncation strategy to use""" + + +@dataclass +class TranslationInput(BaseInferenceType): + """Inputs for Translation inference + Inputs for Text2text Generation inference + """ + + inputs: str + """The input text data""" + parameters: Optional[TranslationGenerationParameters] = None + """Additional inference parameters""" + + +@dataclass +class TranslationOutput(BaseInferenceType): + """Outputs of inference for the Translation task""" + + translation_text: str + """The translated text.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/video_classification.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/video_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..0c5a9d55a81fab6fc71e1226e7776a7a68ee688f --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/video_classification.py @@ -0,0 +1,47 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Literal, Optional + +from .base import BaseInferenceType + + +ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] + + +@dataclass +class VideoClassificationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Video Classification + """ + + frame_sampling_rate: Optional[int] = None + """The sampling rate used to select frames from the video.""" + function_to_apply: Optional["ClassificationOutputTransform"] = None + num_frames: Optional[int] = None + """The number of sampled frames to consider for classification.""" + top_k: Optional[int] = None + """When specified, limits the output to the top K most probable classes.""" + + +@dataclass +class VideoClassificationInput(BaseInferenceType): + """Inputs for Video Classification inference""" + + inputs: Any + """The input video data""" + parameters: Optional[VideoClassificationParameters] = None + """Additional inference parameters""" + + +@dataclass +class VideoClassificationOutputElement(BaseInferenceType): + """Outputs of inference for the Video Classification task""" + + label: str + """The predicted class label.""" + score: float + """The corresponding probability.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_classification.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..6c55ebf218ca3314993aacd7eaa8c1910b5ab63e --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_classification.py @@ -0,0 +1,56 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import List, Optional + +from .base import BaseInferenceType + + +@dataclass +class ZeroShotClassificationInputData(BaseInferenceType): + """The input text data, with candidate labels""" + + candidate_labels: List[str] + """The set of possible class labels to classify the text into.""" + text: str + """The text to classify""" + + +@dataclass +class ZeroShotClassificationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Zero Shot Classification + """ + + hypothesis_template: Optional[str] = None + """The sentence used in conjunction with candidateLabels to attempt the text classification + by replacing the placeholder with the candidate labels. + """ + multi_label: Optional[bool] = None + """Whether multiple candidate labels can be true. If false, the scores are normalized such + that the sum of the label likelihoods for each sequence is 1. If true, the labels are + considered independent and probabilities are normalized for each candidate. + """ + + +@dataclass +class ZeroShotClassificationInput(BaseInferenceType): + """Inputs for Zero Shot Classification inference""" + + inputs: ZeroShotClassificationInputData + """The input text data, with candidate labels""" + parameters: Optional[ZeroShotClassificationParameters] = None + """Additional inference parameters""" + + +@dataclass +class ZeroShotClassificationOutputElement(BaseInferenceType): + """Outputs of inference for the Zero Shot Classification task""" + + label: str + """The predicted class label.""" + score: float + """The corresponding probability.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..1d635187d7ed2f92eb239dc1e4ee4754394dad4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py @@ -0,0 +1,51 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Optional + +from .base import BaseInferenceType + + +@dataclass +class ZeroShotImageClassificationInputData(BaseInferenceType): + """The input image data, with candidate labels""" + + candidate_labels: List[str] + """The candidate labels for this image""" + image: Any + """The image data to classify""" + + +@dataclass +class ZeroShotImageClassificationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Zero Shot Image Classification + """ + + hypothesis_template: Optional[str] = None + """The sentence used in conjunction with candidateLabels to attempt the text classification + by replacing the placeholder with the candidate labels. + """ + + +@dataclass +class ZeroShotImageClassificationInput(BaseInferenceType): + """Inputs for Zero Shot Image Classification inference""" + + inputs: ZeroShotImageClassificationInputData + """The input image data, with candidate labels""" + parameters: Optional[ZeroShotImageClassificationParameters] = None + """Additional inference parameters""" + + +@dataclass +class ZeroShotImageClassificationOutputElement(BaseInferenceType): + """Outputs of inference for the Zero Shot Image Classification task""" + + label: str + """The predicted class label.""" + score: float + """The corresponding probability.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..42a21568c9c652eb307cf2bd44ee9aa06ab4df7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py @@ -0,0 +1,55 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from .base import BaseInferenceType + + +@dataclass +class ZeroShotObjectDetectionInputData(BaseInferenceType): + """The input image data, with candidate labels""" + + candidate_labels: List[str] + """The candidate labels for this image""" + image: Any + """The image data to generate bounding boxes from""" + + +@dataclass +class ZeroShotObjectDetectionInput(BaseInferenceType): + """Inputs for Zero Shot Object Detection inference""" + + inputs: ZeroShotObjectDetectionInputData + """The input image data, with candidate labels""" + parameters: Optional[Dict[str, Any]] = None + """Additional inference parameters""" + + +@dataclass +class ZeroShotObjectDetectionBoundingBox(BaseInferenceType): + """The predicted bounding box. Coordinates are relative to the top left corner of the input + image. + """ + + xmax: int + xmin: int + ymax: int + ymin: int + + +@dataclass +class ZeroShotObjectDetectionOutputElement(BaseInferenceType): + """Outputs of inference for the Zero Shot Object Detection task""" + + box: ZeroShotObjectDetectionBoundingBox + """The predicted bounding box. Coordinates are relative to the top left corner of the input + image. + """ + label: str + """A candidate label""" + score: float + """The associated score / probability""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_chunk_utils.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_chunk_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b0af032ae6a68f03676ad7fdb8e483248d9853f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_chunk_utils.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains a utility to iterate by chunks over an iterator.""" + +import itertools +from typing import Iterable, TypeVar + + +T = TypeVar("T") + + +def chunk_iterable(iterable: Iterable[T], chunk_size: int) -> Iterable[Iterable[T]]: + """Iterates over an iterator chunk by chunk. + + Taken from https://stackoverflow.com/a/8998040. + See also https://github.com/huggingface/huggingface_hub/pull/920#discussion_r938793088. + + Args: + iterable (`Iterable`): + The iterable on which we want to iterate. + chunk_size (`int`): + Size of the chunks. Must be a strictly positive integer (e.g. >0). + + Example: + + ```python + >>> from huggingface_hub.utils import chunk_iterable + + >>> for items in chunk_iterable(range(17), chunk_size=8): + ... print(items) + # [0, 1, 2, 3, 4, 5, 6, 7] + # [8, 9, 10, 11, 12, 13, 14, 15] + # [16] # smaller last chunk + ``` + + Raises: + [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) + If `chunk_size` <= 0. + + + The last chunk can be smaller than `chunk_size`. + + """ + if not isinstance(chunk_size, int) or chunk_size <= 0: + raise ValueError("`chunk_size` must be a strictly positive integer (>0).") + + iterator = iter(iterable) + while True: + try: + next_item = next(iterator) + except StopIteration: + return + yield itertools.chain((next_item,), itertools.islice(iterator, chunk_size - 1)) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_datetime.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..e544884b8793d8d409303cafd34586523fc3fb1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_datetime.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle datetimes in Huggingface Hub.""" + +from datetime import datetime, timezone + + +def parse_datetime(date_string: str) -> datetime: + """ + Parses a date_string returned from the server to a datetime object. + + This parser is a weak-parser is the sense that it handles only a single format of + date_string. It is expected that the server format will never change. The + implementation depends only on the standard lib to avoid an external dependency + (python-dateutil). See full discussion about this decision on PR: + https://github.com/huggingface/huggingface_hub/pull/999. + + Example: + ```py + > parse_datetime('2022-08-19T07:19:38.123Z') + datetime.datetime(2022, 8, 19, 7, 19, 38, 123000, tzinfo=timezone.utc) + ``` + + Args: + date_string (`str`): + A string representing a datetime returned by the Hub server. + String is expected to follow '%Y-%m-%dT%H:%M:%S.%fZ' pattern. + + Returns: + A python datetime object. + + Raises: + :class:`ValueError`: + If `date_string` cannot be parsed. + """ + try: + # Datetime ending with a Z means "UTC". We parse the date and then explicitly + # set the timezone to UTC. + # See https://en.wikipedia.org/wiki/ISO_8601#Coordinated_Universal_Time_(UTC) + # Taken from https://stackoverflow.com/a/3168394. + if len(date_string) == 30: + # Means timezoned-timestamp with nanoseconds precision. We need to truncate the last 3 digits. + date_string = date_string[:-4] + "Z" + dt = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ") + return dt.replace(tzinfo=timezone.utc) # Set explicit timezone + except ValueError as e: + raise ValueError( + f"Cannot parse '{date_string}' as a datetime. Date string is expected to" + " follow '%Y-%m-%dT%H:%M:%S.%fZ' pattern." + ) from e diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_runtime.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..21f852736c841be1caa747f8dc6c8657c0e3d8f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_runtime.py @@ -0,0 +1,372 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Check presence of installed packages at runtime.""" + +import importlib.metadata +import platform +import sys +import warnings +from typing import Any, Dict + +from .. import __version__, constants + + +_PY_VERSION: str = sys.version.split()[0].rstrip("+") + +_package_versions = {} + +_CANDIDATES = { + "aiohttp": {"aiohttp"}, + "fastai": {"fastai"}, + "fastcore": {"fastcore"}, + "gradio": {"gradio"}, + "graphviz": {"graphviz"}, + "hf_transfer": {"hf_transfer"}, + "jinja": {"Jinja2"}, + "keras": {"keras"}, + "minijinja": {"minijinja"}, + "numpy": {"numpy"}, + "pillow": {"Pillow"}, + "pydantic": {"pydantic"}, + "pydot": {"pydot"}, + "safetensors": {"safetensors"}, + "tensorboard": {"tensorboardX"}, + "tensorflow": ( + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "intel-tensorflow", + "intel-tensorflow-avx512", + "tensorflow-rocm", + "tensorflow-macos", + ), + "torch": {"torch"}, +} + +# Check once at runtime +for candidate_name, package_names in _CANDIDATES.items(): + _package_versions[candidate_name] = "N/A" + for name in package_names: + try: + _package_versions[candidate_name] = importlib.metadata.version(name) + break + except importlib.metadata.PackageNotFoundError: + pass + + +def _get_version(package_name: str) -> str: + return _package_versions.get(package_name, "N/A") + + +def is_package_available(package_name: str) -> bool: + return _get_version(package_name) != "N/A" + + +# Python +def get_python_version() -> str: + return _PY_VERSION + + +# Huggingface Hub +def get_hf_hub_version() -> str: + return __version__ + + +# aiohttp +def is_aiohttp_available() -> bool: + return is_package_available("aiohttp") + + +def get_aiohttp_version() -> str: + return _get_version("aiohttp") + + +# FastAI +def is_fastai_available() -> bool: + return is_package_available("fastai") + + +def get_fastai_version() -> str: + return _get_version("fastai") + + +# Fastcore +def is_fastcore_available() -> bool: + return is_package_available("fastcore") + + +def get_fastcore_version() -> str: + return _get_version("fastcore") + + +# FastAI +def is_gradio_available() -> bool: + return is_package_available("gradio") + + +def get_gradio_version() -> str: + return _get_version("gradio") + + +# Graphviz +def is_graphviz_available() -> bool: + return is_package_available("graphviz") + + +def get_graphviz_version() -> str: + return _get_version("graphviz") + + +# hf_transfer +def is_hf_transfer_available() -> bool: + return is_package_available("hf_transfer") + + +def get_hf_transfer_version() -> str: + return _get_version("hf_transfer") + + +# keras +def is_keras_available() -> bool: + return is_package_available("keras") + + +def get_keras_version() -> str: + return _get_version("keras") + + +# Minijinja +def is_minijinja_available() -> bool: + return is_package_available("minijinja") + + +def get_minijinja_version() -> str: + return _get_version("minijinja") + + +# Numpy +def is_numpy_available() -> bool: + return is_package_available("numpy") + + +def get_numpy_version() -> str: + return _get_version("numpy") + + +# Jinja +def is_jinja_available() -> bool: + return is_package_available("jinja") + + +def get_jinja_version() -> str: + return _get_version("jinja") + + +# Pillow +def is_pillow_available() -> bool: + return is_package_available("pillow") + + +def get_pillow_version() -> str: + return _get_version("pillow") + + +# Pydantic +def is_pydantic_available() -> bool: + if not is_package_available("pydantic"): + return False + # For Pydantic, we add an extra check to test whether it is correctly installed or not. If both pydantic 2.x and + # typing_extensions<=4.5.0 are installed, then pydantic will fail at import time. This should not happen when + # it is installed with `pip install huggingface_hub[inference]` but it can happen when it is installed manually + # by the user in an environment that we don't control. + # + # Usually we won't need to do this kind of check on optional dependencies. However, pydantic is a special case + # as it is automatically imported when doing `from huggingface_hub import ...` even if the user doesn't use it. + # + # See https://github.com/huggingface/huggingface_hub/pull/1829 for more details. + try: + from pydantic import validator # noqa: F401 + except ImportError: + # Example: "ImportError: cannot import name 'TypeAliasType' from 'typing_extensions'" + warnings.warn( + "Pydantic is installed but cannot be imported. Please check your installation. `huggingface_hub` will " + "default to not using Pydantic. Error message: '{e}'" + ) + return False + return True + + +def get_pydantic_version() -> str: + return _get_version("pydantic") + + +# Pydot +def is_pydot_available() -> bool: + return is_package_available("pydot") + + +def get_pydot_version() -> str: + return _get_version("pydot") + + +# Tensorboard +def is_tensorboard_available() -> bool: + return is_package_available("tensorboard") + + +def get_tensorboard_version() -> str: + return _get_version("tensorboard") + + +# Tensorflow +def is_tf_available() -> bool: + return is_package_available("tensorflow") + + +def get_tf_version() -> str: + return _get_version("tensorflow") + + +# Torch +def is_torch_available() -> bool: + return is_package_available("torch") + + +def get_torch_version() -> str: + return _get_version("torch") + + +# Safetensors +def is_safetensors_available() -> bool: + return is_package_available("safetensors") + + +# Shell-related helpers +try: + # Set to `True` if script is running in a Google Colab notebook. + # If running in Google Colab, git credential store is set globally which makes the + # warning disappear. See https://github.com/huggingface/huggingface_hub/issues/1043 + # + # Taken from https://stackoverflow.com/a/63519730. + _is_google_colab = "google.colab" in str(get_ipython()) # type: ignore # noqa: F821 +except NameError: + _is_google_colab = False + + +def is_notebook() -> bool: + """Return `True` if code is executed in a notebook (Jupyter, Colab, QTconsole). + + Taken from https://stackoverflow.com/a/39662359. + Adapted to make it work with Google colab as well. + """ + try: + shell_class = get_ipython().__class__ # type: ignore # noqa: F821 + for parent_class in shell_class.__mro__: # e.g. "is subclass of" + if parent_class.__name__ == "ZMQInteractiveShell": + return True # Jupyter notebook, Google colab or qtconsole + return False + except NameError: + return False # Probably standard Python interpreter + + +def is_google_colab() -> bool: + """Return `True` if code is executed in a Google colab. + + Taken from https://stackoverflow.com/a/63519730. + """ + return _is_google_colab + + +def dump_environment_info() -> Dict[str, Any]: + """Dump information about the machine to help debugging issues. + + Similar helper exist in: + - `datasets` (https://github.com/huggingface/datasets/blob/main/src/datasets/commands/env.py) + - `diffusers` (https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/env.py) + - `transformers` (https://github.com/huggingface/transformers/blob/main/src/transformers/commands/env.py) + """ + from huggingface_hub import get_token, whoami + from huggingface_hub.utils import list_credential_helpers + + token = get_token() + + # Generic machine info + info: Dict[str, Any] = { + "huggingface_hub version": get_hf_hub_version(), + "Platform": platform.platform(), + "Python version": get_python_version(), + } + + # Interpreter info + try: + shell_class = get_ipython().__class__ # type: ignore # noqa: F821 + info["Running in iPython ?"] = "Yes" + info["iPython shell"] = shell_class.__name__ + except NameError: + info["Running in iPython ?"] = "No" + info["Running in notebook ?"] = "Yes" if is_notebook() else "No" + info["Running in Google Colab ?"] = "Yes" if is_google_colab() else "No" + + # Login info + info["Token path ?"] = constants.HF_TOKEN_PATH + info["Has saved token ?"] = token is not None + if token is not None: + try: + info["Who am I ?"] = whoami()["name"] + except Exception: + pass + + try: + info["Configured git credential helpers"] = ", ".join(list_credential_helpers()) + except Exception: + pass + + # Installed dependencies + info["FastAI"] = get_fastai_version() + info["Tensorflow"] = get_tf_version() + info["Torch"] = get_torch_version() + info["Jinja2"] = get_jinja_version() + info["Graphviz"] = get_graphviz_version() + info["keras"] = get_keras_version() + info["Pydot"] = get_pydot_version() + info["Pillow"] = get_pillow_version() + info["hf_transfer"] = get_hf_transfer_version() + info["gradio"] = get_gradio_version() + info["tensorboard"] = get_tensorboard_version() + info["numpy"] = get_numpy_version() + info["pydantic"] = get_pydantic_version() + info["aiohttp"] = get_aiohttp_version() + + # Environment variables + info["ENDPOINT"] = constants.ENDPOINT + info["HF_HUB_CACHE"] = constants.HF_HUB_CACHE + info["HF_ASSETS_CACHE"] = constants.HF_ASSETS_CACHE + info["HF_TOKEN_PATH"] = constants.HF_TOKEN_PATH + info["HF_HUB_OFFLINE"] = constants.HF_HUB_OFFLINE + info["HF_HUB_DISABLE_TELEMETRY"] = constants.HF_HUB_DISABLE_TELEMETRY + info["HF_HUB_DISABLE_PROGRESS_BARS"] = constants.HF_HUB_DISABLE_PROGRESS_BARS + info["HF_HUB_DISABLE_SYMLINKS_WARNING"] = constants.HF_HUB_DISABLE_SYMLINKS_WARNING + info["HF_HUB_DISABLE_EXPERIMENTAL_WARNING"] = constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING + info["HF_HUB_DISABLE_IMPLICIT_TOKEN"] = constants.HF_HUB_DISABLE_IMPLICIT_TOKEN + info["HF_HUB_ENABLE_HF_TRANSFER"] = constants.HF_HUB_ENABLE_HF_TRANSFER + info["HF_HUB_ETAG_TIMEOUT"] = constants.HF_HUB_ETAG_TIMEOUT + info["HF_HUB_DOWNLOAD_TIMEOUT"] = constants.HF_HUB_DOWNLOAD_TIMEOUT + + print("\nCopy-and-paste the text below in your GitHub issue.\n") + print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n") + return info diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_telemetry.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_telemetry.py new file mode 100644 index 0000000000000000000000000000000000000000..5de988e2795188324f69232d1beb68191591715d --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_telemetry.py @@ -0,0 +1,118 @@ +from queue import Queue +from threading import Lock, Thread +from typing import Dict, Optional, Union +from urllib.parse import quote + +from .. import constants, logging +from . import build_hf_headers, get_session, hf_raise_for_status + + +logger = logging.get_logger(__name__) + +# Telemetry is sent by a separate thread to avoid blocking the main thread. +# A daemon thread is started once and consume tasks from the _TELEMETRY_QUEUE. +# If the thread stops for some reason -shouldn't happen-, we restart a new one. +_TELEMETRY_THREAD: Optional[Thread] = None +_TELEMETRY_THREAD_LOCK = Lock() # Lock to avoid starting multiple threads in parallel +_TELEMETRY_QUEUE: Queue = Queue() + + +def send_telemetry( + topic: str, + *, + library_name: Optional[str] = None, + library_version: Optional[str] = None, + user_agent: Union[Dict, str, None] = None, +) -> None: + """ + Sends telemetry that helps tracking usage of different HF libraries. + + This usage data helps us debug issues and prioritize new features. However, we understand that not everyone wants + to share additional information, and we respect your privacy. You can disable telemetry collection by setting the + `HF_HUB_DISABLE_TELEMETRY=1` as environment variable. Telemetry is also disabled in offline mode (i.e. when setting + `HF_HUB_OFFLINE=1`). + + Telemetry collection is run in a separate thread to minimize impact for the user. + + Args: + topic (`str`): + Name of the topic that is monitored. The topic is directly used to build the URL. If you want to monitor + subtopics, just use "/" separation. Examples: "gradio", "transformers/examples",... + library_name (`str`, *optional*): + The name of the library that is making the HTTP request. Will be added to the user-agent header. + library_version (`str`, *optional*): + The version of the library that is making the HTTP request. Will be added to the user-agent header. + user_agent (`str`, `dict`, *optional*): + The user agent info in the form of a dictionary or a single string. It will be completed with information about the installed packages. + + Example: + ```py + >>> from huggingface_hub.utils import send_telemetry + + # Send telemetry without library information + >>> send_telemetry("ping") + + # Send telemetry to subtopic with library information + >>> send_telemetry("gradio/local_link", library_name="gradio", library_version="3.22.1") + + # Send telemetry with additional data + >>> send_telemetry( + ... topic="examples", + ... library_name="transformers", + ... library_version="4.26.0", + ... user_agent={"pipeline": "text_classification", "framework": "flax"}, + ... ) + ``` + """ + if constants.HF_HUB_OFFLINE or constants.HF_HUB_DISABLE_TELEMETRY: + return + + _start_telemetry_thread() # starts thread only if doesn't exist yet + _TELEMETRY_QUEUE.put( + {"topic": topic, "library_name": library_name, "library_version": library_version, "user_agent": user_agent} + ) + + +def _start_telemetry_thread(): + """Start a daemon thread to consume tasks from the telemetry queue. + + If the thread is interrupted, start a new one. + """ + with _TELEMETRY_THREAD_LOCK: # avoid to start multiple threads if called concurrently + global _TELEMETRY_THREAD + if _TELEMETRY_THREAD is None or not _TELEMETRY_THREAD.is_alive(): + _TELEMETRY_THREAD = Thread(target=_telemetry_worker, daemon=True) + _TELEMETRY_THREAD.start() + + +def _telemetry_worker(): + """Wait for a task and consume it.""" + while True: + kwargs = _TELEMETRY_QUEUE.get() + _send_telemetry_in_thread(**kwargs) + _TELEMETRY_QUEUE.task_done() + + +def _send_telemetry_in_thread( + topic: str, + *, + library_name: Optional[str] = None, + library_version: Optional[str] = None, + user_agent: Union[Dict, str, None] = None, +) -> None: + """Contains the actual data sending data to the Hub.""" + path = "/".join(quote(part) for part in topic.split("/") if len(part) > 0) + try: + r = get_session().head( + f"{constants.ENDPOINT}/api/telemetry/{path}", + headers=build_hf_headers( + token=False, # no need to send a token for telemetry + library_name=library_name, + library_version=library_version, + user_agent=user_agent, + ), + ) + hf_raise_for_status(r) + except Exception as e: + # We don't want to error in case of connection errors of any kind. + logger.debug(f"Error while sending telemetry: {e}") diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..f58d38919791127b871b5d1accb9b38b064e1075 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py @@ -0,0 +1,231 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to validate argument values in `huggingface_hub`.""" + +import inspect +import re +import warnings +from functools import wraps +from itertools import chain +from typing import Any, Dict + +from ._typing import CallableT + + +REPO_ID_REGEX = re.compile( + r""" + ^ + (\b[\w\-.]+\b/)? # optional namespace (username or organization) + \b # starts with a word boundary + [\w\-.]{1,96} # repo_name: alphanumeric + . _ - + \b # ends with a word boundary + $ + """, + flags=re.VERBOSE, +) + + +class HFValidationError(ValueError): + """Generic exception thrown by `huggingface_hub` validators. + + Inherits from [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError). + """ + + +def validate_hf_hub_args(fn: CallableT) -> CallableT: + """Validate values received as argument for any public method of `huggingface_hub`. + + The goal of this decorator is to harmonize validation of arguments reused + everywhere. By default, all defined validators are tested. + + Validators: + - [`~utils.validate_repo_id`]: `repo_id` must be `"repo_name"` + or `"namespace/repo_name"`. Namespace is a username or an organization. + - [`~utils.smoothly_deprecate_use_auth_token`]: Use `token` instead of + `use_auth_token` (only if `use_auth_token` is not expected by the decorated + function - in practice, always the case in `huggingface_hub`). + + Example: + ```py + >>> from huggingface_hub.utils import validate_hf_hub_args + + >>> @validate_hf_hub_args + ... def my_cool_method(repo_id: str): + ... print(repo_id) + + >>> my_cool_method(repo_id="valid_repo_id") + valid_repo_id + + >>> my_cool_method("other..repo..id") + huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'. + + >>> my_cool_method(repo_id="other..repo..id") + huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'. + + >>> @validate_hf_hub_args + ... def my_cool_auth_method(token: str): + ... print(token) + + >>> my_cool_auth_method(token="a token") + "a token" + + >>> my_cool_auth_method(use_auth_token="a use_auth_token") + "a use_auth_token" + + >>> my_cool_auth_method(token="a token", use_auth_token="a use_auth_token") + UserWarning: Both `token` and `use_auth_token` are passed (...) + "a token" + ``` + + Raises: + [`~utils.HFValidationError`]: + If an input is not valid. + """ + # TODO: add an argument to opt-out validation for specific argument? + signature = inspect.signature(fn) + + # Should the validator switch `use_auth_token` values to `token`? In practice, always + # True in `huggingface_hub`. Might not be the case in a downstream library. + check_use_auth_token = "use_auth_token" not in signature.parameters and "token" in signature.parameters + + @wraps(fn) + def _inner_fn(*args, **kwargs): + has_token = False + for arg_name, arg_value in chain( + zip(signature.parameters, args), # Args values + kwargs.items(), # Kwargs values + ): + if arg_name in ["repo_id", "from_id", "to_id"]: + validate_repo_id(arg_value) + + elif arg_name == "token" and arg_value is not None: + has_token = True + + if check_use_auth_token: + kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs) + + return fn(*args, **kwargs) + + return _inner_fn # type: ignore + + +def validate_repo_id(repo_id: str) -> None: + """Validate `repo_id` is valid. + + This is not meant to replace the proper validation made on the Hub but rather to + avoid local inconsistencies whenever possible (example: passing `repo_type` in the + `repo_id` is forbidden). + + Rules: + - Between 1 and 96 characters. + - Either "repo_name" or "namespace/repo_name" + - [a-zA-Z0-9] or "-", "_", "." + - "--" and ".." are forbidden + + Valid: `"foo"`, `"foo/bar"`, `"123"`, `"Foo-BAR_foo.bar123"` + + Not valid: `"datasets/foo/bar"`, `".repo_id"`, `"foo--bar"`, `"foo.git"` + + Example: + ```py + >>> from huggingface_hub.utils import validate_repo_id + >>> validate_repo_id(repo_id="valid_repo_id") + >>> validate_repo_id(repo_id="other..repo..id") + huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'. + ``` + + Discussed in https://github.com/huggingface/huggingface_hub/issues/1008. + In moon-landing (internal repository): + - https://github.com/huggingface/moon-landing/blob/main/server/lib/Names.ts#L27 + - https://github.com/huggingface/moon-landing/blob/main/server/views/components/NewRepoForm/NewRepoForm.svelte#L138 + """ + if not isinstance(repo_id, str): + # Typically, a Path is not a repo_id + raise HFValidationError(f"Repo id must be a string, not {type(repo_id)}: '{repo_id}'.") + + if repo_id.count("/") > 1: + raise HFValidationError( + "Repo id must be in the form 'repo_name' or 'namespace/repo_name':" + f" '{repo_id}'. Use `repo_type` argument if needed." + ) + + if not REPO_ID_REGEX.match(repo_id): + raise HFValidationError( + "Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are" + " forbidden, '-' and '.' cannot start or end the name, max length is 96:" + f" '{repo_id}'." + ) + + if "--" in repo_id or ".." in repo_id: + raise HFValidationError(f"Cannot have -- or .. in repo_id: '{repo_id}'.") + + if repo_id.endswith(".git"): + raise HFValidationError(f"Repo_id cannot end by '.git': '{repo_id}'.") + + +def smoothly_deprecate_use_auth_token(fn_name: str, has_token: bool, kwargs: Dict[str, Any]) -> Dict[str, Any]: + """Smoothly deprecate `use_auth_token` in the `huggingface_hub` codebase. + + The long-term goal is to remove any mention of `use_auth_token` in the codebase in + favor of a unique and less verbose `token` argument. This will be done a few steps: + + 0. Step 0: methods that require a read-access to the Hub use the `use_auth_token` + argument (`str`, `bool` or `None`). Methods requiring write-access have a `token` + argument (`str`, `None`). This implicit rule exists to be able to not send the + token when not necessary (`use_auth_token=False`) even if logged in. + + 1. Step 1: we want to harmonize everything and use `token` everywhere (supporting + `token=False` for read-only methods). In order not to break existing code, if + `use_auth_token` is passed to a function, the `use_auth_token` value is passed + as `token` instead, without any warning. + a. Corner case: if both `use_auth_token` and `token` values are passed, a warning + is thrown and the `use_auth_token` value is ignored. + + 2. Step 2: Once it is release, we should push downstream libraries to switch from + `use_auth_token` to `token` as much as possible, but without throwing a warning + (e.g. manually create issues on the corresponding repos). + + 3. Step 3: After a transitional period (6 months e.g. until April 2023?), we update + `huggingface_hub` to throw a warning on `use_auth_token`. Hopefully, very few + users will be impacted as it would have already been fixed. + In addition, unit tests in `huggingface_hub` must be adapted to expect warnings + to be thrown (but still use `use_auth_token` as before). + + 4. Step 4: After a normal deprecation cycle (3 releases ?), remove this validator. + `use_auth_token` will definitely not be supported. + In addition, we update unit tests in `huggingface_hub` to use `token` everywhere. + + This has been discussed in: + - https://github.com/huggingface/huggingface_hub/issues/1094. + - https://github.com/huggingface/huggingface_hub/pull/928 + - (related) https://github.com/huggingface/huggingface_hub/pull/1064 + """ + new_kwargs = kwargs.copy() # do not mutate input ! + + use_auth_token = new_kwargs.pop("use_auth_token", None) # remove from kwargs + if use_auth_token is not None: + if has_token: + warnings.warn( + "Both `token` and `use_auth_token` are passed to" + f" `{fn_name}` with non-None values. `token` is now the" + " preferred argument to pass a User Access Token." + " `use_auth_token` value will be ignored." + ) + else: + # `token` argument is not passed and a non-None value is passed in + # `use_auth_token` => use `use_auth_token` value as `token` kwarg. + new_kwargs["token"] = use_auth_token + + return new_kwargs diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/endpoint_helpers.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/endpoint_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..ac52ce85a0f45932aeefc702eb44828ad2e17871 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/endpoint_helpers.py @@ -0,0 +1,250 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Helpful utility functions and classes in relation to exploring API endpoints +with the aim for a user-friendly interface. +""" + +import math +import re +import warnings +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Optional, Union + +from ..repocard_data import ModelCardData + + +if TYPE_CHECKING: + from ..hf_api import ModelInfo + + +def _is_emission_within_treshold(model_info: "ModelInfo", minimum_threshold: float, maximum_threshold: float) -> bool: + """Checks if a model's emission is within a given threshold. + + Args: + model_info (`ModelInfo`): + A model info object containing the model's emission information. + minimum_threshold (`float`): + A minimum carbon threshold to filter by, such as 1. + maximum_threshold (`float`): + A maximum carbon threshold to filter by, such as 10. + + Returns: + `bool`: Whether the model's emission is within the given threshold. + """ + if minimum_threshold is None and maximum_threshold is None: + raise ValueError("Both `minimum_threshold` and `maximum_threshold` cannot both be `None`") + if minimum_threshold is None: + minimum_threshold = -1 + if maximum_threshold is None: + maximum_threshold = math.inf + + card_data = getattr(model_info, "card_data", None) + if card_data is None or not isinstance(card_data, (dict, ModelCardData)): + return False + + # Get CO2 emission metadata + emission = card_data.get("co2_eq_emissions", None) + if isinstance(emission, dict): + emission = emission["emissions"] + if not emission: + return False + + # Filter out if value is missing or out of range + matched = re.search(r"\d+\.\d+|\d+", str(emission)) + if matched is None: + return False + + emission_value = float(matched.group(0)) + return minimum_threshold <= emission_value <= maximum_threshold + + +@dataclass +class DatasetFilter: + """ + A class that converts human-readable dataset search parameters into ones + compatible with the REST API. For all parameters capitalization does not + matter. + + + + The `DatasetFilter` class is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to [`list_datasets`]. + + + + Args: + author (`str`, *optional*): + A string that can be used to identify datasets on + the Hub by the original uploader (author or organization), such as + `facebook` or `huggingface`. + benchmark (`str` or `List`, *optional*): + A string or list of strings that can be used to identify datasets on + the Hub by their official benchmark. + dataset_name (`str`, *optional*): + A string or list of strings that can be used to identify datasets on + the Hub by its name, such as `SQAC` or `wikineural` + language_creators (`str` or `List`, *optional*): + A string or list of strings that can be used to identify datasets on + the Hub with how the data was curated, such as `crowdsourced` or + `machine_generated`. + language (`str` or `List`, *optional*): + A string or list of strings representing a two-character language to + filter datasets by on the Hub. + multilinguality (`str` or `List`, *optional*): + A string or list of strings representing a filter for datasets that + contain multiple languages. + size_categories (`str` or `List`, *optional*): + A string or list of strings that can be used to identify datasets on + the Hub by the size of the dataset such as `100K>> from huggingface_hub import DatasetFilter + + >>> # Using author + >>> new_filter = DatasetFilter(author="facebook") + + >>> # Using benchmark + >>> new_filter = DatasetFilter(benchmark="raft") + + >>> # Using dataset_name + >>> new_filter = DatasetFilter(dataset_name="wikineural") + + >>> # Using language_creator + >>> new_filter = DatasetFilter(language_creator="crowdsourced") + + >>> # Using language + >>> new_filter = DatasetFilter(language="en") + + >>> # Using multilinguality + >>> new_filter = DatasetFilter(multilinguality="multilingual") + + >>> # Using size_categories + >>> new_filter = DatasetFilter(size_categories="100K>> # Using task_categories + >>> new_filter = DatasetFilter(task_categories="audio_classification") + + >>> # Using task_ids + >>> new_filter = DatasetFilter(task_ids="paraphrase") + ``` + """ + + author: Optional[str] = None + benchmark: Optional[Union[str, List[str]]] = None + dataset_name: Optional[str] = None + language_creators: Optional[Union[str, List[str]]] = None + language: Optional[Union[str, List[str]]] = None + multilinguality: Optional[Union[str, List[str]]] = None + size_categories: Optional[Union[str, List[str]]] = None + task_categories: Optional[Union[str, List[str]]] = None + task_ids: Optional[Union[str, List[str]]] = None + + def __post_init__(self): + warnings.warn( + "'DatasetFilter' is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to the `list_datasets` method.", + category=FutureWarning, + ) + + +@dataclass +class ModelFilter: + """ + A class that converts human-readable model search parameters into ones + compatible with the REST API. For all parameters capitalization does not + matter. + + + + The `ModelFilter` class is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to [`list_models`]. + + + + Args: + author (`str`, *optional*): + A string that can be used to identify models on the Hub by the + original uploader (author or organization), such as `facebook` or + `huggingface`. + library (`str` or `List`, *optional*): + A string or list of strings of foundational libraries models were + originally trained from, such as pytorch, tensorflow, or allennlp. + language (`str` or `List`, *optional*): + A string or list of strings of languages, both by name and country + code, such as "en" or "English" + model_name (`str`, *optional*): + A string that contain complete or partial names for models on the + Hub, such as "bert" or "bert-base-cased" + task (`str` or `List`, *optional*): + A string or list of strings of tasks models were designed for, such + as: "fill-mask" or "automatic-speech-recognition" + tags (`str` or `List`, *optional*): + A string tag or a list of tags to filter models on the Hub by, such + as `text-generation` or `spacy`. + trained_dataset (`str` or `List`, *optional*): + A string tag or a list of string tags of the trained dataset for a + model on the Hub. + + Examples: + + ```python + >>> from huggingface_hub import ModelFilter + + >>> # For the author_or_organization + >>> new_filter = ModelFilter(author_or_organization="facebook") + + >>> # For the library + >>> new_filter = ModelFilter(library="pytorch") + + >>> # For the language + >>> new_filter = ModelFilter(language="french") + + >>> # For the model_name + >>> new_filter = ModelFilter(model_name="bert") + + >>> # For the task + >>> new_filter = ModelFilter(task="text-classification") + + >>> from huggingface_hub import HfApi + + >>> api = HfApi() + # To list model tags + + >>> new_filter = ModelFilter(tags="benchmark:raft") + + >>> # Related to the dataset + >>> new_filter = ModelFilter(trained_dataset="common_voice") + ``` + """ + + author: Optional[str] = None + library: Optional[Union[str, List[str]]] = None + language: Optional[Union[str, List[str]]] = None + model_name: Optional[str] = None + task: Optional[Union[str, List[str]]] = None + trained_dataset: Optional[Union[str, List[str]]] = None + tags: Optional[Union[str, List[str]]] = None + + def __post_init__(self): + warnings.warn( + "'ModelFilter' is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to the `list_models` method.", + FutureWarning, + )