diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8bb1d25032841ecd9342b337ee5e627c2c9a4f6a --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e2270217a6beedf39e3ceaf34c23c471933c3cbfd459c2a6a65c9d489ab829e +size 41830138 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_146_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_146_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..56428e5404215ea011e37e5e1629429308fdc64e --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_146_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de11eef1689266966a61852ddd9640b55f8aeec0a7d78c5f61eb5c0b6c234dba +size 41830212 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_231_mp_rank_00_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_231_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e339d8a89d83d3c4f04c4b7c83e9ddc166e95e1 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_231_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5371e49b761b2e1ade3343ff95bdda562973d333951ba9ced82c2317903e63b2 +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_244_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_244_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c777dbfa4c180bf0599e81fe8778dc44971f9fdd --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_244_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09970cc0b8f7909e6b1f4b833b8363b2a8176a2aa1c8dadbf799e0d7759fd307 +size 41830340 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_80_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_80_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..01cf2bc5320189d4568440fe4e2208823dfb6c42 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_80_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30d12eb1bb108cd8b5fb9939f1fe4c9cef5e889f27b506d012b61ff5329b2b1 +size 41830330 diff --git a/venv/lib/python3.10/site-packages/examples/__init__.py b/venv/lib/python3.10/site-packages/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/examples/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e806e935ef60d08a2058be14f3d294901bac2a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/examples/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/examples/__pycache__/basic_example.cpython-310.pyc b/venv/lib/python3.10/site-packages/examples/__pycache__/basic_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..511ce50aa4a3d1242c14fe738f623500d4666f13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/examples/__pycache__/basic_example.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/examples/basic_example.py b/venv/lib/python3.10/site-packages/examples/basic_example.py new file mode 100644 index 0000000000000000000000000000000000000000..7d6402f67c25ac28f5d0132398e3982d030c86f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/examples/basic_example.py @@ -0,0 +1,60 @@ +from time import sleep +import multiprocessing + +import logging +from tqdm_multiprocess.logger import setup_logger_tqdm +logger = logging.getLogger(__name__) + +from tqdm_multiprocess import TqdmMultiProcessPool + +def some_other_function(tqdm_func): + iterations1 = 100 + iterations2 = 5 + iterations3 = 2 + + total_iterations = iterations1 * iterations2 * iterations3 + with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress3: + progress3.set_description("outer") + for i in range(iterations3): + logger.info("outer") + total_iterations = iterations1 * iterations2 + with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress2: + progress2.set_description("middle") + for j in range(iterations2): + logger.info("middle") + #for k in tqdm_func(range(iterations1), dynamic_ncols=True, desc="inner"): + with tqdm_func(total=iterations1, dynamic_ncols=True) as progress1: + for j in range(iterations1): + # logger.info("inner") # Spam slows down tqdm too much + progress1.set_description("innert") + sleep(0.01) + progress1.update() + progress2.update() + progress3.update() + + logger.warning(f"Warning test message. {multiprocessing.current_process().name}") + logger.error(f"Error test message. {multiprocessing.current_process().name}") + + +# Multiprocessed +def example_multiprocessing_function(some_input, tqdm_func): + logger.debug(f"Debug test message - I won't show up in console. {multiprocessing.current_process().name}") + logger.info(f"Info test message. {multiprocessing.current_process().name}") + some_other_function(tqdm_func) + return True + +def error_callback(): + print("Error!") + +def example(): + pool = TqdmMultiProcessPool() + process_count = 4 + task_count = 10 + initial_tasks = [(example_multiprocessing_function, (i,)) for i in range(task_count)] + results = pool.map(process_count, initial_tasks, error_callback) + print(results) + +if __name__ == '__main__': + logfile_path = "tqdm_multiprocessing_example.log" + setup_logger_tqdm(logfile_path) # Logger will write messages using tqdm.write + example() \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f56a153967692035211f5d0414320bd56d4cfe3e Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e441f258d2abea53d3a4de712005bf39c0431e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfd72d227abd34ac30c05619e7fa40fd4c7d1db2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e816d96483b548819804b838b8d37c979e3a62c Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/__init__.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10a650e6cd2be652d6fce82a8234bff577fc8ca1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_client.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_client.py new file mode 100644 index 0000000000000000000000000000000000000000..1e9f5faa5b3890a1a17da6f981df3f6052ace841 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_client.py @@ -0,0 +1,2354 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Related resources: +# https://huggingface.co/tasks +# https://huggingface.co/docs/huggingface.js/inference/README +# https://github.com/huggingface/huggingface.js/tree/main/packages/inference/src +# https://github.com/huggingface/text-generation-inference/tree/main/clients/python +# https://github.com/huggingface/text-generation-inference/blob/main/clients/python/text_generation/client.py +# https://huggingface.slack.com/archives/C03E4DQ9LAJ/p1680169099087869 +# https://github.com/huggingface/unity-api#tasks +# +# Some TODO: +# - add all tasks +# +# NOTE: the philosophy of this client is "let's make it as easy as possible to use it, even if less optimized". Some +# examples of how it translates: +# - Timeout / Server unavailable is handled by the client in a single "timeout" parameter. +# - Files can be provided as bytes, file paths, or URLs and the client will try to "guess" the type. +# - Images are parsed as PIL.Image for easier manipulation. +# - Provides a "recommended model" for each task => suboptimal but user-wise quicker to get a first script running. +# - Only the main parameters are publicly exposed. Power users can always read the docs for more options. +import base64 +import logging +import time +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Literal, + Optional, + Union, + overload, +) + +from requests import HTTPError +from requests.structures import CaseInsensitiveDict + +from huggingface_hub.constants import ALL_INFERENCE_API_FRAMEWORKS, INFERENCE_ENDPOINT, MAIN_INFERENCE_API_FRAMEWORKS +from huggingface_hub.errors import InferenceTimeoutError +from huggingface_hub.inference._common import ( + TASKS_EXPECTING_IMAGES, + ContentT, + ModelStatus, + _b64_encode, + _b64_to_image, + _bytes_to_dict, + _bytes_to_image, + _bytes_to_list, + _fetch_recommended_models, + _import_numpy, + _is_chat_completion_server, + _is_tgi_server, + _open_as_binary, + _set_as_non_chat_completion_server, + _set_as_non_tgi, + _stream_chat_completion_response_from_bytes, + _stream_chat_completion_response_from_text_generation, + _stream_text_generation_response, + raise_text_generation_error, +) +from huggingface_hub.inference._generated.types import ( + AudioClassificationOutputElement, + AudioToAudioOutputElement, + AutomaticSpeechRecognitionOutput, + ChatCompletionOutput, + ChatCompletionOutputChoice, + ChatCompletionOutputChoiceMessage, + ChatCompletionStreamOutput, + DocumentQuestionAnsweringOutputElement, + FillMaskOutputElement, + ImageClassificationOutputElement, + ImageSegmentationOutputElement, + ImageToTextOutput, + ObjectDetectionOutputElement, + QuestionAnsweringOutputElement, + SummarizationOutput, + TableQuestionAnsweringOutputElement, + TextClassificationOutputElement, + TextGenerationOutput, + TextGenerationStreamOutput, + TokenClassificationOutputElement, + TranslationOutput, + VisualQuestionAnsweringOutputElement, + ZeroShotClassificationOutputElement, + ZeroShotImageClassificationOutputElement, +) +from huggingface_hub.inference._templating import render_chat_prompt +from huggingface_hub.inference._types import ( + ConversationalOutput, # soon to be removed +) +from huggingface_hub.utils import ( + BadRequestError, + build_hf_headers, + get_session, + hf_raise_for_status, +) + + +if TYPE_CHECKING: + import numpy as np + from PIL import Image + +logger = logging.getLogger(__name__) + + +class InferenceClient: + """ + Initialize a new Inference Client. + + [`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used + seamlessly with either the (free) Inference API or self-hosted Inference Endpoints. + + Args: + model (`str`, `optional`): + The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `bigcode/starcoder` + or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is + automatically selected for the task. + token (`str` or `bool`, *optional*): + Hugging Face token. Will default to the locally saved token if not provided. + Pass `token=False` if you don't want to send your token to the server. + timeout (`float`, `optional`): + The maximum number of seconds to wait for a response from the server. Loading a new model in Inference + API can take up to several minutes. Defaults to None, meaning it will loop until the server is available. + headers (`Dict[str, str]`, `optional`): + Additional headers to send to the server. By default only the authorization and user-agent headers are sent. + Values in this dictionary will override the default values. + cookies (`Dict[str, str]`, `optional`): + Additional cookies to send to the server. + """ + + def __init__( + self, + model: Optional[str] = None, + token: Union[str, bool, None] = None, + timeout: Optional[float] = None, + headers: Optional[Dict[str, str]] = None, + cookies: Optional[Dict[str, str]] = None, + ) -> None: + self.model: Optional[str] = model + self.token: Union[str, bool, None] = token + self.headers = CaseInsensitiveDict(build_hf_headers(token=token)) # contains 'authorization' + 'user-agent' + if headers is not None: + self.headers.update(headers) + self.cookies = cookies + self.timeout = timeout + + def __repr__(self): + return f"" + + @overload + def post( # type: ignore[misc] + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: Literal[False] = ..., + ) -> bytes: ... + + @overload + def post( # type: ignore[misc] + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: Literal[True] = ..., + ) -> Iterable[bytes]: ... + + @overload + def post( + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: bool = False, + ) -> Union[bytes, Iterable[bytes]]: ... + + def post( + self, + *, + json: Optional[Union[str, Dict, List]] = None, + data: Optional[ContentT] = None, + model: Optional[str] = None, + task: Optional[str] = None, + stream: bool = False, + ) -> Union[bytes, Iterable[bytes]]: + """ + Make a POST request to the inference server. + + Args: + json (`Union[str, Dict, List]`, *optional*): + The JSON data to send in the request body, specific to each task. Defaults to None. + data (`Union[str, Path, bytes, BinaryIO]`, *optional*): + The content to send in the request body, specific to each task. + It can be raw bytes, a pointer to an opened file, a local file path, + or a URL to an online resource (image, audio file,...). If both `json` and `data` are passed, + `data` will take precedence. At least `json` or `data` must be provided. Defaults to None. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. Will override the model defined at the instance level. Defaults to None. + task (`str`, *optional*): + The task to perform on the inference. All available tasks can be found + [here](https://huggingface.co/tasks). Used only to default to a recommended model if `model` is not + provided. At least `model` or `task` must be provided. Defaults to None. + stream (`bool`, *optional*): + Whether to iterate over streaming APIs. + + Returns: + bytes: The raw bytes returned by the server. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + """ + url = self._resolve_url(model, task) + + if data is not None and json is not None: + warnings.warn("Ignoring `json` as `data` is passed as binary.") + + # Set Accept header if relevant + headers = self.headers.copy() + if task in TASKS_EXPECTING_IMAGES and "Accept" not in headers: + headers["Accept"] = "image/png" + + t0 = time.time() + timeout = self.timeout + while True: + with _open_as_binary(data) as data_as_binary: + try: + response = get_session().post( + url, + json=json, + data=data_as_binary, + headers=headers, + cookies=self.cookies, + timeout=self.timeout, + stream=stream, + ) + except TimeoutError as error: + # Convert any `TimeoutError` to a `InferenceTimeoutError` + raise InferenceTimeoutError(f"Inference call timed out: {url}") from error # type: ignore + + try: + hf_raise_for_status(response) + return response.iter_lines() if stream else response.content + except HTTPError as error: + if error.response.status_code == 422 and task is not None: + error.args = ( + f"{error.args[0]}\nMake sure '{task}' task is supported by the model.", + ) + error.args[1:] + if error.response.status_code == 503: + # If Model is unavailable, either raise a TimeoutError... + if timeout is not None and time.time() - t0 > timeout: + raise InferenceTimeoutError( + f"Model not loaded on the server: {url}. Please retry with a higher timeout (current:" + f" {self.timeout}).", + request=error.request, + response=error.response, + ) from error + # ...or wait 1s and retry + logger.info(f"Waiting for model to be loaded on the server: {error}") + time.sleep(1) + if timeout is not None: + timeout = max(self.timeout - (time.time() - t0), 1) # type: ignore + continue + raise + + def audio_classification( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> List[AudioClassificationOutputElement]: + """ + Perform audio classification on the provided audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio classification will be used. + + Returns: + `List[AudioClassificationOutputElement]`: List of [`AudioClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.audio_classification("audio.flac") + [ + AudioClassificationOutputElement(score=0.4976358711719513, label='hap'), + AudioClassificationOutputElement(score=0.3677836060523987, label='neu'), + ... + ] + ``` + """ + response = self.post(data=audio, model=model, task="audio-classification") + return AudioClassificationOutputElement.parse_obj_as_list(response) + + def audio_to_audio( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> List[AudioToAudioOutputElement]: + """ + Performs multiple tasks related to audio-to-audio depending on the model (eg: speech enhancement, source separation). + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content for the model. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model can be any model which takes an audio file and returns another audio file. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio_to_audio will be used. + + Returns: + `List[AudioToAudioOutputElement]`: A list of [`AudioToAudioOutputElement`] items containing audios label, content-type, and audio content in blob. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> audio_output = client.audio_to_audio("audio.flac") + >>> for i, item in enumerate(audio_output): + >>> with open(f"output_{i}.flac", "wb") as f: + f.write(item.blob) + ``` + """ + response = self.post(data=audio, model=model, task="audio-to-audio") + audio_output = AudioToAudioOutputElement.parse_obj_as_list(response) + for item in audio_output: + item.blob = base64.b64decode(item.blob) + return audio_output + + def automatic_speech_recognition( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> AutomaticSpeechRecognitionOutput: + """ + Perform automatic speech recognition (ASR or audio-to-text) on the given audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file. + model (`str`, *optional*): + The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for ASR will be used. + + Returns: + [`AutomaticSpeechRecognitionOutput`]: An item containing the transcribed text and optionally the timestamp chunks. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.automatic_speech_recognition("hello_world.flac").text + "hello world" + ``` + """ + response = self.post(data=audio, model=model, task="automatic-speech-recognition") + return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response) + + @overload + def chat_completion( # type: ignore + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: Literal[False] = False, + max_tokens: int = 20, + seed: Optional[int] = None, + stop: Optional[Union[List[str], str]] = None, + temperature: float = 1.0, + top_p: Optional[float] = None, + ) -> ChatCompletionOutput: ... + + @overload + def chat_completion( # type: ignore + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: Literal[True] = True, + max_tokens: int = 20, + seed: Optional[int] = None, + stop: Optional[Union[List[str], str]] = None, + temperature: float = 1.0, + top_p: Optional[float] = None, + ) -> Iterable[ChatCompletionStreamOutput]: ... + + @overload + def chat_completion( + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: bool = False, + max_tokens: int = 20, + seed: Optional[int] = None, + stop: Optional[Union[List[str], str]] = None, + temperature: float = 1.0, + top_p: Optional[float] = None, + ) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]: ... + + def chat_completion( + self, + messages: List[Dict[str, str]], + *, + model: Optional[str] = None, + stream: bool = False, + max_tokens: int = 20, + seed: Optional[int] = None, + stop: Optional[Union[List[str], str]] = None, + temperature: float = 1.0, + top_p: Optional[float] = None, + ) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]: + """ + A method for completing conversations using a specified language model. + + + + If the model is served by a server supporting chat-completion, the method will directly call the server's + `/v1/chat/completions` endpoint. If the server does not support chat-completion, the method will render the + chat template client-side based on the information fetched from the Hub API. In this case, you will need to + have `minijinja` template engine installed. Run `pip install "huggingface_hub[inference]"` or `pip install minijinja` + to install it. + + + + Args: + messages (List[Union[`SystemMessage`, `UserMessage`, `AssistantMessage`]]): + Conversation history consisting of roles and content pairs. + model (`str`, *optional*): + The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used. + See https://huggingface.co/tasks/text-generation for more details. + frequency_penalty (`float`, optional): + Penalizes new tokens based on their existing frequency + in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. + max_tokens (`int`, optional): + Maximum number of tokens allowed in the response. Defaults to 20. + seed (Optional[`int`], optional): + Seed for reproducible control flow. Defaults to None. + stop (Optional[`str`], optional): + Up to four strings which trigger the end of the response. + Defaults to None. + stream (`bool`, optional): + Enable realtime streaming of responses. Defaults to False. + temperature (`float`, optional): + Controls randomness of the generations. Lower values ensure + less random completions. Range: [0, 2]. Defaults to 1.0. + top_p (`float`, optional): + Fraction of the most likely next words to sample from. + Must be between 0 and 1. Defaults to 1.0. + + Returns: + `Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]`: + Generated text returned from the server: + - if `stream=False`, the generated text is returned as a [`ChatCompletionOutput`] (default). + - if `stream=True`, the generated text is returned token by token as a sequence of [`ChatCompletionStreamOutput`]. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> messages = [{"role": "user", "content": "What is the capital of France?"}] + >>> client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") + >>> client.chat_completion(messages, max_tokens=100) + ChatCompletionOutput( + choices=[ + ChatCompletionOutputChoice( + finish_reason='eos_token', + index=0, + message=ChatCompletionOutputChoiceMessage( + content='The capital of France is Paris. The official name of the city is "Ville de Paris" (City of Paris) and the name of the country\'s governing body, which is located in Paris, is "La République française" (The French Republic). \nI hope that helps! Let me know if you need any further information.' + ) + ) + ], + created=1710498360 + ) + + >>> for token in client.chat_completion(messages, max_tokens=10, stream=True): + ... print(token) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content='The', role='assistant'), index=0, finish_reason=None)], created=1710498504) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' capital', role='assistant'), index=0, finish_reason=None)], created=1710498504) + (...) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' may', role='assistant'), index=0, finish_reason=None)], created=1710498504) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=None, role=None), index=0, finish_reason='length')], created=1710498504) + ``` + """ + # determine model + model = model or self.model or self.get_recommended_model("text-generation") + + if _is_chat_completion_server(model): + # First, let's consider the server has a `/v1/chat/completions` endpoint. + # If that's the case, we don't have to render the chat template client-side. + model_url = self._resolve_url(model) + if not model_url.endswith("/chat/completions"): + model_url += "/v1/chat/completions" + + try: + data = self.post( + model=model_url, + json=dict( + model="tgi", # random string + messages=messages, + max_tokens=max_tokens, + seed=seed, + stop=stop, + temperature=temperature, + top_p=top_p, + stream=stream, + ), + stream=stream, + ) + except HTTPError: + # Let's consider the server is not a chat completion server. + # Then we call again `chat_completion` which will render the chat template client side. + # (can be HTTP 500, HTTP 400, HTTP 404 depending on the server) + _set_as_non_chat_completion_server(model) + return self.chat_completion( + messages=messages, + model=model, + stream=stream, + max_tokens=max_tokens, + seed=seed, + stop=stop, + temperature=temperature, + top_p=top_p, + ) + + if stream: + return _stream_chat_completion_response_from_bytes(data) # type: ignore[arg-type] + + return ChatCompletionOutput.parse_obj_as_instance(data) # type: ignore[arg-type] + + # At this point, we know the server is not a chat completion server. + # We need to render the chat template client side based on the information we can fetch from + # the Hub API. + + model_id = None + if model.startswith(("http://", "https://")): + # If URL, we need to know which model is served. This is not always possible. + # A workaround is to list the user Inference Endpoints and check if one of them correspond to the model URL. + # If not, we raise an error. + # TODO: fix when we have a proper API for this (at least for Inference Endpoints) + # TODO: what if Sagemaker URL? + # TODO: what if Azure URL? + from ..hf_api import HfApi + + for endpoint in HfApi(token=self.token).list_inference_endpoints(): + if endpoint.url == model: + model_id = endpoint.repository + break + else: + model_id = model + + if model_id is None: + # If we don't have the model ID, we can't fetch the chat template. + # We raise an error. + raise ValueError( + "Request can't be processed as the model ID can't be inferred from model URL. " + "This is needed to fetch the chat template from the Hub since the model is not " + "served with a Chat-completion API." + ) + + # fetch chat template + tokens + prompt = render_chat_prompt(model_id=model_id, token=self.token, messages=messages) + + # generate response + stop_sequences = [stop] if isinstance(stop, str) else stop + text_generation_output = self.text_generation( + prompt=prompt, + details=True, + stream=stream, + model=model, + max_new_tokens=max_tokens, + seed=seed, + stop_sequences=stop_sequences, + temperature=temperature, + top_p=top_p, + ) + + created = int(time.time()) + + if stream: + return _stream_chat_completion_response_from_text_generation(text_generation_output) # type: ignore [arg-type] + + if isinstance(text_generation_output, TextGenerationOutput): + # General use case => format ChatCompletionOutput from text generation details + content: str = text_generation_output.generated_text + finish_reason: str = text_generation_output.details.finish_reason # type: ignore[union-attr] + else: + # Corner case: if server doesn't support details (e.g. if not a TGI server), we only receive an output string. + # In such a case, `finish_reason` is set to `"unk"`. + content = text_generation_output # type: ignore[assignment] + finish_reason = "unk" + + return ChatCompletionOutput( + created=created, + choices=[ + ChatCompletionOutputChoice( + finish_reason=finish_reason, # type: ignore + index=0, + message=ChatCompletionOutputChoiceMessage( + content=content, + role="assistant", + ), + ) + ], + ) + + def conversational( + self, + text: str, + generated_responses: Optional[List[str]] = None, + past_user_inputs: Optional[List[str]] = None, + *, + parameters: Optional[Dict[str, Any]] = None, + model: Optional[str] = None, + ) -> ConversationalOutput: + """ + Generate conversational responses based on the given input text (i.e. chat with the API). + + + + [`InferenceClient.conversational`] API is deprecated and will be removed in a future release. Please use + [`InferenceClient.chat_completion`] instead. + + + + Args: + text (`str`): + The last input from the user in the conversation. + generated_responses (`List[str]`, *optional*): + A list of strings corresponding to the earlier replies from the model. Defaults to None. + past_user_inputs (`List[str]`, *optional*): + A list of strings corresponding to the earlier replies from the user. Should be the same length as + `generated_responses`. Defaults to None. + parameters (`Dict[str, Any]`, *optional*): + Additional parameters for the conversational task. Defaults to None. For more details about the available + parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#conversational-task) + model (`str`, *optional*): + The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used. + Defaults to None. + + Returns: + `Dict`: The generated conversational output. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> output = client.conversational("Hi, who are you?") + >>> output + {'generated_text': 'I am the one who knocks.', 'conversation': {'generated_responses': ['I am the one who knocks.'], 'past_user_inputs': ['Hi, who are you?']}, 'warnings': ['Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.']} + >>> client.conversational( + ... "Wow, that's scary!", + ... generated_responses=output["conversation"]["generated_responses"], + ... past_user_inputs=output["conversation"]["past_user_inputs"], + ... ) + ``` + """ + warnings.warn( + "'InferenceClient.conversational' is deprecated and will be removed starting from huggingface_hub>=0.25. " + "Please use the more appropriate 'InferenceClient.chat_completion' API instead.", + FutureWarning, + ) + payload: Dict[str, Any] = {"inputs": {"text": text}} + if generated_responses is not None: + payload["inputs"]["generated_responses"] = generated_responses + if past_user_inputs is not None: + payload["inputs"]["past_user_inputs"] = past_user_inputs + if parameters is not None: + payload["parameters"] = parameters + response = self.post(json=payload, model=model, task="conversational") + return _bytes_to_dict(response) # type: ignore + + def document_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + ) -> List[DocumentQuestionAnsweringOutputElement]: + """ + Answer questions on document images. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for the context. It can be raw bytes, an image file, or a URL to an online image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the document question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended document question answering model will be used. + Defaults to None. + + Returns: + `List[DocumentQuestionAnsweringOutputElement]`: a list of [`DocumentQuestionAnsweringOutputElement`] items containing the predicted label, associated probability, word ids, and page number. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.document_question_answering(image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", question="What is the invoice number?") + [DocumentQuestionAnsweringOutputElement(score=0.42515629529953003, answer='us-001', start=16, end=16)] + ``` + """ + payload: Dict[str, Any] = {"question": question, "image": _b64_encode(image)} + response = self.post(json=payload, model=model, task="document-question-answering") + return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response) + + def feature_extraction(self, text: str, *, model: Optional[str] = None) -> "np.ndarray": + """ + Generate embeddings for a given text. + + Args: + text (`str`): + The text to embed. + model (`str`, *optional*): + The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used. + Defaults to None. + + Returns: + `np.ndarray`: The embedding representing the input text as a float32 numpy array. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.feature_extraction("Hi, who are you?") + array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ], + [-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ], + ..., + [ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32) + ``` + """ + response = self.post(json={"inputs": text}, model=model, task="feature-extraction") + np = _import_numpy() + return np.array(_bytes_to_dict(response), dtype="float32") + + def fill_mask(self, text: str, *, model: Optional[str] = None) -> List[FillMaskOutputElement]: + """ + Fill in a hole with a missing word (token to be precise). + + Args: + text (`str`): + a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask). + model (`str`, *optional*): + The model to use for the fill mask task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended fill mask model will be used. + Defaults to None. + + Returns: + `List[FillMaskOutputElement]`: a list of [`FillMaskOutputElement`] items containing the predicted label, associated + probability, token reference, and completed text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.fill_mask("The goal of life is .") + [ + FillMaskOutputElement(score=0.06897063553333282, token=11098, token_str=' happiness', sequence='The goal of life is happiness.'), + FillMaskOutputElement(score=0.06554922461509705, token=45075, token_str=' immortality', sequence='The goal of life is immortality.') + ] + ``` + """ + response = self.post(json={"inputs": text}, model=model, task="fill-mask") + return FillMaskOutputElement.parse_obj_as_list(response) + + def image_classification( + self, + image: ContentT, + *, + model: Optional[str] = None, + ) -> List[ImageClassificationOutputElement]: + """ + Perform image classification on the given image using the specified model. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to classify. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used. + + Returns: + `List[ImageClassificationOutputElement]`: a list of [`ImageClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + [ImageClassificationOutputElement(score=0.9779096841812134, label='Blenheim spaniel'), ...] + ``` + """ + response = self.post(data=image, model=model, task="image-classification") + return ImageClassificationOutputElement.parse_obj_as_list(response) + + def image_segmentation( + self, + image: ContentT, + *, + model: Optional[str] = None, + ) -> List[ImageSegmentationOutputElement]: + """ + Perform image segmentation on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to segment. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used. + + Returns: + `List[ImageSegmentationOutputElement]`: A list of [`ImageSegmentationOutputElement`] items containing the segmented masks and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.image_segmentation("cat.jpg"): + [ImageSegmentationOutputElement(score=0.989008, label='LABEL_184', mask=), ...] + ``` + """ + response = self.post(data=image, model=model, task="image-segmentation") + output = ImageSegmentationOutputElement.parse_obj_as_list(response) + for item in output: + item.mask = _b64_to_image(item.mask) + return output + + def image_to_image( + self, + image: ContentT, + prompt: Optional[str] = None, + *, + negative_prompt: Optional[str] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + **kwargs, + ) -> "Image": + """ + Perform image-to-image translation using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for translation. It can be raw bytes, an image file, or a URL to an online image. + prompt (`str`, *optional*): + The text prompt to guide the image generation. + negative_prompt (`str`, *optional*): + A negative prompt to guide the translation process. + height (`int`, *optional*): + The height in pixels of the generated image. + width (`int`, *optional*): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*): + Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `Image`: The translated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> image = client.image_to_image("cat.jpg", prompt="turn the cat into a tiger") + >>> image.save("tiger.jpg") + ``` + """ + parameters = { + "prompt": prompt, + "negative_prompt": negative_prompt, + "height": height, + "width": width, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + **kwargs, + } + if all(parameter is None for parameter in parameters.values()): + # Either only an image to send => send as raw bytes + data = image + payload: Optional[Dict[str, Any]] = None + else: + # Or an image + some parameters => use base64 encoding + data = None + payload = {"inputs": _b64_encode(image)} + for key, value in parameters.items(): + if value is not None: + payload.setdefault("parameters", {})[key] = value + + response = self.post(json=payload, data=data, model=model, task="image-to-image") + return _bytes_to_image(response) + + def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput: + """ + Takes an input image and return text. + + Models can have very different outputs depending on your use case (image captioning, optical character recognition + (OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image to caption. It can be raw bytes, an image file, or a URL to an online image.. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + [`ImageToTextOutput`]: The generated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.image_to_text("cat.jpg") + 'a cat standing in a grassy field ' + >>> client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + 'a dog laying on the grass next to a flower pot ' + ``` + """ + response = self.post(data=image, model=model, task="image-to-text") + return ImageToTextOutput.parse_obj_as_instance(response) + + def list_deployed_models( + self, frameworks: Union[None, str, Literal["all"], List[str]] = None + ) -> Dict[str, List[str]]: + """ + List models currently deployed on the Inference API service. + + This helper checks deployed models framework by framework. By default, it will check the 4 main frameworks that + are supported and account for 95% of the hosted models. However, if you want a complete list of models you can + specify `frameworks="all"` as input. Alternatively, if you know before-hand which framework you are interested + in, you can also restrict to search to this one (e.g. `frameworks="text-generation-inference"`). The more + frameworks are checked, the more time it will take. + + + + This endpoint is mostly useful for discoverability. If you already know which model you want to use and want to + check its availability, you can directly use [`~InferenceClient.get_model_status`]. + + + + Args: + frameworks (`Literal["all"]` or `List[str]` or `str`, *optional*): + The frameworks to filter on. By default only a subset of the available frameworks are tested. If set to + "all", all available frameworks will be tested. It is also possible to provide a single framework or a + custom set of frameworks to check. + + Returns: + `Dict[str, List[str]]`: A dictionary mapping task names to a sorted list of model IDs. + + Example: + ```python + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + # Discover zero-shot-classification models currently deployed + >>> models = client.list_deployed_models() + >>> models["zero-shot-classification"] + ['Narsil/deberta-large-mnli-zero-cls', 'facebook/bart-large-mnli', ...] + + # List from only 1 framework + >>> client.list_deployed_models("text-generation-inference") + {'text-generation': ['bigcode/starcoder', 'meta-llama/Llama-2-70b-chat-hf', ...], ...} + ``` + """ + # Resolve which frameworks to check + if frameworks is None: + frameworks = MAIN_INFERENCE_API_FRAMEWORKS + elif frameworks == "all": + frameworks = ALL_INFERENCE_API_FRAMEWORKS + elif isinstance(frameworks, str): + frameworks = [frameworks] + frameworks = list(set(frameworks)) + + # Fetch them iteratively + models_by_task: Dict[str, List[str]] = {} + + def _unpack_response(framework: str, items: List[Dict]) -> None: + for model in items: + if framework == "sentence-transformers": + # Model running with the `sentence-transformers` framework can work with both tasks even if not + # branded as such in the API response + models_by_task.setdefault("feature-extraction", []).append(model["model_id"]) + models_by_task.setdefault("sentence-similarity", []).append(model["model_id"]) + else: + models_by_task.setdefault(model["task"], []).append(model["model_id"]) + + for framework in frameworks: + response = get_session().get(f"{INFERENCE_ENDPOINT}/framework/{framework}", headers=self.headers) + hf_raise_for_status(response) + _unpack_response(framework, response.json()) + + # Sort alphabetically for discoverability and return + for task, models in models_by_task.items(): + models_by_task[task] = sorted(set(models), key=lambda x: x.lower()) + return models_by_task + + def object_detection( + self, + image: ContentT, + *, + model: Optional[str] = None, + ) -> List[ObjectDetectionOutputElement]: + """ + Perform object detection on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to detect objects on. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for object detection. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for object detection (DETR) will be used. + + Returns: + `List[ObjectDetectionOutputElement]`: A list of [`ObjectDetectionOutputElement`] items containing the bounding boxes and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If the request output is not a List. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.object_detection("people.jpg"): + [ObjectDetectionOutputElement(score=0.9486683011054993, label='person', box=ObjectDetectionBoundingBox(xmin=59, ymin=39, xmax=420, ymax=510)), ...] + ``` + """ + # detect objects + response = self.post(data=image, model=model, task="object-detection") + return ObjectDetectionOutputElement.parse_obj_as_list(response) + + def question_answering( + self, question: str, context: str, *, model: Optional[str] = None + ) -> QuestionAnsweringOutputElement: + """ + Retrieve the answer to a question from a given text. + + Args: + question (`str`): + Question to be answered. + context (`str`): + The context of the question. + model (`str`): + The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. + + Returns: + [`QuestionAnsweringOutputElement`]: an question answering output containing the score, start index, end index, and answer. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") + QuestionAnsweringOutputElement(score=0.9326562285423279, start=11, end=16, answer='Clara') + ``` + """ + + payload: Dict[str, Any] = {"question": question, "context": context} + response = self.post( + json=payload, + model=model, + task="question-answering", + ) + return QuestionAnsweringOutputElement.parse_obj_as_instance(response) + + def sentence_similarity( + self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None + ) -> List[float]: + """ + Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings. + + Args: + sentence (`str`): + The main sentence to compare to others. + other_sentences (`List[str]`): + The list of sentences to compare to. + model (`str`, *optional*): + The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used. + Defaults to None. + + Returns: + `List[float]`: The embedding representing the input text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.sentence_similarity( + ... "Machine learning is so easy.", + ... other_sentences=[ + ... "Deep learning is so straightforward.", + ... "This is so difficult, like rocket science.", + ... "I can't believe how much I struggled with this.", + ... ], + ... ) + [0.7785726189613342, 0.45876261591911316, 0.2906220555305481] + ``` + """ + response = self.post( + json={"inputs": {"source_sentence": sentence, "sentences": other_sentences}}, + model=model, + task="sentence-similarity", + ) + return _bytes_to_list(response) + + def summarization( + self, + text: str, + *, + parameters: Optional[Dict[str, Any]] = None, + model: Optional[str] = None, + ) -> SummarizationOutput: + """ + Generate a summary of a given text using a specified model. + + Args: + text (`str`): + The input text to summarize. + parameters (`Dict[str, Any]`, *optional*): + Additional parameters for summarization. Check out this [page](https://huggingface.co/docs/api-inference/detailed_parameters#summarization-task) + for more details. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + [`SummarizationOutput`]: The generated summary text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.summarization("The Eiffel tower...") + SummarizationOutput(generated_text="The Eiffel tower is one of the most famous landmarks in the world....") + ``` + """ + payload: Dict[str, Any] = {"inputs": text} + if parameters is not None: + payload["parameters"] = parameters + response = self.post(json=payload, model=model, task="summarization") + return SummarizationOutput.parse_obj_as_list(response)[0] + + def table_question_answering( + self, table: Dict[str, Any], query: str, *, model: Optional[str] = None + ) -> TableQuestionAnsweringOutputElement: + """ + Retrieve the answer to a question from information given in a table. + + Args: + table (`str`): + A table of data represented as a dict of lists where entries are headers and the lists are all the + values, all lists must have the same size. + query (`str`): + The query in plain text that you want to ask the table. + model (`str`): + The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face + Hub or a URL to a deployed Inference Endpoint. + + Returns: + [`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> query = "How many stars does the transformers repository have?" + >>> table = {"Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"]} + >>> client.table_question_answering(table, query, model="google/tapas-base-finetuned-wtq") + TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE') + ``` + """ + response = self.post( + json={ + "query": query, + "table": table, + }, + model=model, + task="table-question-answering", + ) + return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response) + + def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[str]: + """ + Classifying a target category (a group) based on a set of attributes. + + Args: + table (`Dict[str, Any]`): + Set of attributes to classify. + model (`str`, *optional*): + The model to use for the tabular classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular classification model will be used. + Defaults to None. + + Returns: + `List`: a list of labels, one per row in the initial table. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> table = { + ... "fixed_acidity": ["7.4", "7.8", "10.3"], + ... "volatile_acidity": ["0.7", "0.88", "0.32"], + ... "citric_acid": ["0", "0", "0.45"], + ... "residual_sugar": ["1.9", "2.6", "6.4"], + ... "chlorides": ["0.076", "0.098", "0.073"], + ... "free_sulfur_dioxide": ["11", "25", "5"], + ... "total_sulfur_dioxide": ["34", "67", "13"], + ... "density": ["0.9978", "0.9968", "0.9976"], + ... "pH": ["3.51", "3.2", "3.23"], + ... "sulphates": ["0.56", "0.68", "0.82"], + ... "alcohol": ["9.4", "9.8", "12.6"], + ... } + >>> client.tabular_classification(table=table, model="julien-c/wine-quality") + ["5", "5", "5"] + ``` + """ + response = self.post(json={"table": table}, model=model, task="tabular-classification") + return _bytes_to_list(response) + + def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[float]: + """ + Predicting a numerical target value given a set of attributes/features in a table. + + Args: + table (`Dict[str, Any]`): + Set of attributes stored in a table. The attributes used to predict the target can be both numerical and categorical. + model (`str`, *optional*): + The model to use for the tabular regression task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular regression model will be used. + Defaults to None. + + Returns: + `List`: a list of predicted numerical target values. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> table = { + ... "Height": ["11.52", "12.48", "12.3778"], + ... "Length1": ["23.2", "24", "23.9"], + ... "Length2": ["25.4", "26.3", "26.5"], + ... "Length3": ["30", "31.2", "31.1"], + ... "Species": ["Bream", "Bream", "Bream"], + ... "Width": ["4.02", "4.3056", "4.6961"], + ... } + >>> client.tabular_regression(table, model="scikit-learn/Fish-Weight") + [110, 120, 130] + ``` + """ + response = self.post(json={"table": table}, model=model, task="tabular-regression") + return _bytes_to_list(response) + + def text_classification(self, text: str, *, model: Optional[str] = None) -> List[TextClassificationOutputElement]: + """ + Perform text classification (e.g. sentiment-analysis) on the given text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the text classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended text classification model will be used. + Defaults to None. + + Returns: + `List[TextClassificationOutputElement]`: a list of [`TextClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.text_classification("I like you") + [ + TextClassificationOutputElement(label='POSITIVE', score=0.9998695850372314), + TextClassificationOutputElement(label='NEGATIVE', score=0.0001304351753788069), + ] + ``` + """ + response = self.post(json={"inputs": text}, model=model, task="text-classification") + return TextClassificationOutputElement.parse_obj_as_list(response)[0] # type: ignore [return-value] + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + do_sample: bool = False, + max_new_tokens: int = 20, + best_of: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: bool = False, + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: bool = False, + ) -> str: ... + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + do_sample: bool = False, + max_new_tokens: int = 20, + best_of: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: bool = False, + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: bool = False, + ) -> TextGenerationOutput: ... + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + do_sample: bool = False, + max_new_tokens: int = 20, + best_of: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: bool = False, + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: bool = False, + ) -> Iterable[str]: ... + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + do_sample: bool = False, + max_new_tokens: int = 20, + best_of: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: bool = False, + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: bool = False, + ) -> Iterable[TextGenerationStreamOutput]: ... + + @overload + def text_generation( + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: bool = ..., + model: Optional[str] = None, + do_sample: bool = False, + max_new_tokens: int = 20, + best_of: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: bool = False, + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: bool = False, + ) -> Union[TextGenerationOutput, Iterable[TextGenerationStreamOutput]]: ... + + def text_generation( + self, + prompt: str, + *, + details: bool = False, + stream: bool = False, + model: Optional[str] = None, + do_sample: bool = False, + max_new_tokens: int = 20, + best_of: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: bool = False, + seed: Optional[int] = None, + stop_sequences: Optional[List[str]] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: bool = False, + decoder_input_details: bool = False, + ) -> Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]: + """ + Given a prompt, generate the following text. + + API endpoint is supposed to run with the `text-generation-inference` backend (TGI). This backend is the + go-to solution to run large language models at scale. However, for some smaller models (e.g. "gpt2") the + default `transformers` + `api-inference` solution is still in use. Both approaches have very similar APIs, but + not exactly the same. This method is compatible with both approaches but some parameters are only available for + `text-generation-inference`. If some parameters are ignored, a warning message is triggered but the process + continues correctly. + + To learn more about the TGI project, please refer to https://github.com/huggingface/text-generation-inference. + + Args: + prompt (`str`): + Input text. + details (`bool`, *optional*): + By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens, + probabilities, seed, finish reason, etc.). Only available for models running on with the + `text-generation-inference` backend. + stream (`bool`, *optional*): + By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of + tokens to be returned. Only available for models running on with the `text-generation-inference` + backend. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + do_sample (`bool`): + Activate logits sampling + max_new_tokens (`int`): + Maximum number of generated tokens + best_of (`int`): + Generate best_of sequences and return the one if the highest token logprobs + repetition_penalty (`float`): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + return_full_text (`bool`): + Whether to prepend the prompt to the generated text + seed (`int`): + Random sampling seed + stop_sequences (`List[str]`): + Stop generating tokens if a member of `stop_sequences` is generated + temperature (`float`): + The value used to module the logits distribution. + top_k (`int`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + truncate (`int`): + Truncate inputs tokens to the given size + typical_p (`float`): + Typical Decoding mass + See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information + watermark (`bool`): + Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) + decoder_input_details (`bool`): + Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken + into account. Defaults to `False`. + + Returns: + `Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]`: + Generated text returned from the server: + - if `stream=False` and `details=False`, the generated text is returned as a `str` (default) + - if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]` + - if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.TextGenerationOutput`] + - if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.TextGenerationStreamOutput`] + + Raises: + `ValidationError`: + If input values are not valid. No HTTP call is made to the server. + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + # Case 1: generate text + >>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12) + '100% open source and built to be easy to use.' + + # Case 2: iterate over the generated tokens. Useful for large generation. + >>> for token in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True): + ... print(token) + 100 + % + open + source + and + built + to + be + easy + to + use + . + + # Case 3: get more details about the generation process. + >>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True) + TextGenerationOutput( + generated_text='100% open source and built to be easy to use.', + details=TextGenerationDetails( + finish_reason='length', + generated_tokens=12, + seed=None, + prefill=[ + TextGenerationPrefillToken(id=487, text='The', logprob=None), + TextGenerationPrefillToken(id=53789, text=' hugging', logprob=-13.171875), + (...) + TextGenerationPrefillToken(id=204, text=' ', logprob=-7.0390625) + ], + tokens=[ + TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), + TokenElement(id=16, text='%', logprob=-0.0463562, special=False), + (...) + TokenElement(id=25, text='.', logprob=-0.5703125, special=False) + ], + best_of_sequences=None + ) + ) + + # Case 4: iterate over the generated tokens with more details. + # Last object is more complete, containing the full generated text and the finish reason. + >>> for details in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True): + ... print(details) + ... + TextGenerationStreamOutput(token=TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement( + id=25, + text='.', + logprob=-0.5703125, + special=False), + generated_text='100% open source and built to be easy to use.', + details=TextGenerationStreamDetails(finish_reason='length', generated_tokens=12, seed=None) + ) + ``` + """ + if decoder_input_details and not details: + warnings.warn( + "`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that" + " the output from the server will be truncated." + ) + decoder_input_details = False + + # Build payload + payload = { + "inputs": prompt, + "parameters": { + "best_of": best_of, + "decoder_input_details": decoder_input_details, + "details": details, + "do_sample": do_sample, + "max_new_tokens": max_new_tokens, + "repetition_penalty": repetition_penalty, + "return_full_text": return_full_text, + "seed": seed, + "stop": stop_sequences if stop_sequences is not None else [], + "temperature": temperature, + "top_k": top_k, + "top_p": top_p, + "truncate": truncate, + "typical_p": typical_p, + "watermark": watermark, + }, + "stream": stream, + } + + # Remove some parameters if not a TGI server + if not _is_tgi_server(model): + parameters: Dict = payload["parameters"] # type: ignore [assignment] + + ignored_parameters = [] + for key in "watermark", "details", "decoder_input_details", "best_of", "stop", "return_full_text": + if parameters[key] is not None: + ignored_parameters.append(key) + del parameters[key] + if len(ignored_parameters) > 0: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Ignoring parameters" + f" {ignored_parameters}.", + UserWarning, + ) + if details: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will" + " be ignored meaning only the generated text will be returned.", + UserWarning, + ) + details = False + if stream: + raise ValueError( + "API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream." + " Please pass `stream=False` as input." + ) + + # Handle errors separately for more precise error messages + try: + bytes_output = self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore + except HTTPError as e: + if isinstance(e, BadRequestError) and "The following `model_kwargs` are not used by the model" in str(e): + _set_as_non_tgi(model) + return self.text_generation( # type: ignore + prompt=prompt, + details=details, + stream=stream, + model=model, + do_sample=do_sample, + max_new_tokens=max_new_tokens, + best_of=best_of, + repetition_penalty=repetition_penalty, + return_full_text=return_full_text, + seed=seed, + stop_sequences=stop_sequences, + temperature=temperature, + top_k=top_k, + top_p=top_p, + truncate=truncate, + typical_p=typical_p, + watermark=watermark, + decoder_input_details=decoder_input_details, + ) + raise_text_generation_error(e) + + # Parse output + if stream: + return _stream_text_generation_response(bytes_output, details) # type: ignore + + data = _bytes_to_dict(bytes_output)[0] # type: ignore[arg-type] + return TextGenerationOutput.parse_obj_as_instance(data) if details else data["generated_text"] + + def text_to_image( + self, + prompt: str, + *, + negative_prompt: Optional[str] = None, + height: Optional[float] = None, + width: Optional[float] = None, + num_inference_steps: Optional[float] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + **kwargs, + ) -> "Image": + """ + Generate an image based on a given text using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + prompt (`str`): + The prompt to generate an image from. + negative_prompt (`str`, *optional*): + An optional negative prompt for the image generation. + height (`float`, *optional*): + The height in pixels of the image to generate. + width (`float`, *optional*): + The width in pixels of the image to generate. + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*): + Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `Image`: The generated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + >>> image = client.text_to_image("An astronaut riding a horse on the moon.") + >>> image.save("astronaut.png") + + >>> image = client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... negative_prompt="low resolution, blurry", + ... model="stabilityai/stable-diffusion-2-1", + ... ) + >>> image.save("better_astronaut.png") + ``` + """ + payload = {"inputs": prompt} + parameters = { + "negative_prompt": negative_prompt, + "height": height, + "width": width, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + **kwargs, + } + for key, value in parameters.items(): + if value is not None: + payload.setdefault("parameters", {})[key] = value # type: ignore + response = self.post(json=payload, model=model, task="text-to-image") + return _bytes_to_image(response) + + def text_to_speech(self, text: str, *, model: Optional[str] = None) -> bytes: + """ + Synthesize an audio of a voice pronouncing a given text. + + Args: + text (`str`): + The text to synthesize. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `bytes`: The generated audio. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from pathlib import Path + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + >>> audio = client.text_to_speech("Hello world") + >>> Path("hello_world.flac").write_bytes(audio) + ``` + """ + return self.post(json={"inputs": text}, model=model, task="text-to-speech") + + def token_classification( + self, text: str, *, model: Optional[str] = None + ) -> List[TokenClassificationOutputElement]: + """ + Perform token classification on the given text. + Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the token classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended token classification model will be used. + Defaults to None. + + Returns: + `List[TokenClassificationOutputElement]`: List of [`TokenClassificationOutputElement`] items containing the entity group, confidence score, word, start and end index. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.token_classification("My name is Sarah Jessica Parker but you can call me Jessica") + [ + TokenClassificationOutputElement( + entity_group='PER', + score=0.9971321225166321, + word='Sarah Jessica Parker', + start=11, + end=31, + ), + TokenClassificationOutputElement( + entity_group='PER', + score=0.9773476123809814, + word='Jessica', + start=52, + end=59, + ) + ] + ``` + """ + payload: Dict[str, Any] = {"inputs": text} + response = self.post( + json=payload, + model=model, + task="token-classification", + ) + return TokenClassificationOutputElement.parse_obj_as_list(response) + + def translation( + self, text: str, *, model: Optional[str] = None, src_lang: Optional[str] = None, tgt_lang: Optional[str] = None + ) -> TranslationOutput: + """ + Convert text from one language to another. + + Check out https://huggingface.co/tasks/translation for more information on how to choose the best model for + your specific use case. Source and target languages usually depend on the model. + However, it is possible to specify source and target languages for certain models. If you are working with one of these models, + you can use `src_lang` and `tgt_lang` arguments to pass the relevant information. + You can find this information in the model card. + + Args: + text (`str`): + A string to be translated. + model (`str`, *optional*): + The model to use for the translation task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended translation model will be used. + Defaults to None. + src_lang (`str`, *optional*): + Source language of the translation task, i.e. input language. Cannot be passed without `tgt_lang`. + tgt_lang (`str`, *optional*): + Target language of the translation task, i.e. output language. Cannot be passed without `src_lang`. + + Returns: + [`TranslationOutput`]: The generated translated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If only one of the `src_lang` and `tgt_lang` arguments are provided. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.translation("My name is Wolfgang and I live in Berlin") + 'Mein Name ist Wolfgang und ich lebe in Berlin.' + >>> client.translation("My name is Wolfgang and I live in Berlin", model="Helsinki-NLP/opus-mt-en-fr") + TranslationOutput(translation_text='Je m\'appelle Wolfgang et je vis à Berlin.') + ``` + + Specifying languages: + ```py + >>> client.translation("My name is Sarah Jessica Parker but you can call me Jessica", model="facebook/mbart-large-50-many-to-many-mmt", src_lang="en_XX", tgt_lang="fr_XX") + "Mon nom est Sarah Jessica Parker mais vous pouvez m\'appeler Jessica" + ``` + """ + # Throw error if only one of `src_lang` and `tgt_lang` was given + if src_lang is not None and tgt_lang is None: + raise ValueError("You cannot specify `src_lang` without specifying `tgt_lang`.") + + if src_lang is None and tgt_lang is not None: + raise ValueError("You cannot specify `tgt_lang` without specifying `src_lang`.") + + # If both `src_lang` and `tgt_lang` are given, pass them to the request body + payload: Dict = {"inputs": text} + if src_lang and tgt_lang: + payload["parameters"] = {"src_lang": src_lang, "tgt_lang": tgt_lang} + response = self.post(json=payload, model=model, task="translation") + return TranslationOutput.parse_obj_as_list(response)[0] + + def visual_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + ) -> List[VisualQuestionAnsweringOutputElement]: + """ + Answering open-ended questions based on an image. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for the context. It can be raw bytes, an image file, or a URL to an online image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the visual question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended visual question answering model will be used. + Defaults to None. + + Returns: + `List[VisualQuestionAnsweringOutputElement]`: a list of [`VisualQuestionAnsweringOutputElement`] items containing the predicted label and associated probability. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.visual_question_answering( + ... image="https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg", + ... question="What is the animal doing?" + ... ) + [ + VisualQuestionAnsweringOutputElement(score=0.778609573841095, answer='laying down'), + VisualQuestionAnsweringOutputElement(score=0.6957435607910156, answer='sitting'), + ] + ``` + """ + payload: Dict[str, Any] = {"question": question, "image": _b64_encode(image)} + response = self.post(json=payload, model=model, task="visual-question-answering") + return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response) + + def zero_shot_classification( + self, text: str, labels: List[str], *, multi_label: bool = False, model: Optional[str] = None + ) -> List[ZeroShotClassificationOutputElement]: + """ + Provide as input a text and a set of candidate labels to classify the input text. + + Args: + text (`str`): + The input text to classify. + labels (`List[str]`): + List of string possible labels. There must be at least 2 labels. + multi_label (`bool`): + Boolean that is set to True if classes can overlap. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `List[ZeroShotClassificationOutputElement]`: List of [`ZeroShotClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> text = ( + ... "A new model offers an explanation for how the Galilean satellites formed around the solar system's" + ... "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling" + ... " mysteries when he went for a run up a hill in Nice, France." + ... ) + >>> labels = ["space & cosmos", "scientific discovery", "microbiology", "robots", "archeology"] + >>> client.zero_shot_classification(text, labels) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.7961668968200684), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.18570658564567566), + ZeroShotClassificationOutputElement(label='microbiology', score=0.00730885099619627), + ZeroShotClassificationOutputElement(label='archeology', score=0.006258360575884581), + ZeroShotClassificationOutputElement(label='robots', score=0.004559356719255447), + ] + >>> client.zero_shot_classification(text, labels, multi_label=True) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.9829297661781311), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.755190908908844), + ZeroShotClassificationOutputElement(label='microbiology', score=0.0005462635890580714), + ZeroShotClassificationOutputElement(label='archeology', score=0.00047131875180639327), + ZeroShotClassificationOutputElement(label='robots', score=0.00030448526376858354), + ] + ``` + """ + # Raise ValueError if input is less than 2 labels + if len(labels) < 2: + raise ValueError("You must specify at least 2 classes to compare.") + + response = self.post( + json={ + "inputs": text, + "parameters": { + "candidate_labels": ",".join(labels), + "multi_label": multi_label, + }, + }, + model=model, + task="zero-shot-classification", + ) + output = _bytes_to_dict(response) + return [ + ZeroShotClassificationOutputElement.parse_obj_as_instance({"label": label, "score": score}) + for label, score in zip(output["labels"], output["scores"]) + ] + + def zero_shot_image_classification( + self, image: ContentT, labels: List[str], *, model: Optional[str] = None + ) -> List[ZeroShotImageClassificationOutputElement]: + """ + Provide input image and text labels to predict text labels for the image. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image to caption. It can be raw bytes, an image file, or a URL to an online image. + labels (`List[str]`): + List of string possible labels. There must be at least 2 labels. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `List[ZeroShotImageClassificationOutputElement]`: List of [`ZeroShotImageClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + >>> client.zero_shot_image_classification( + ... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg", + ... labels=["dog", "cat", "horse"], + ... ) + [ZeroShotImageClassificationOutputElement(label='dog', score=0.956),...] + ``` + """ + # Raise ValueError if input is less than 2 labels + if len(labels) < 2: + raise ValueError("You must specify at least 2 classes to compare.") + + response = self.post( + json={"image": _b64_encode(image), "parameters": {"candidate_labels": ",".join(labels)}}, + model=model, + task="zero-shot-image-classification", + ) + return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response) + + def _resolve_url(self, model: Optional[str] = None, task: Optional[str] = None) -> str: + model = model or self.model + + # If model is already a URL, ignore `task` and return directly + if model is not None and (model.startswith("http://") or model.startswith("https://")): + return model + + # # If no model but task is set => fetch the recommended one for this task + if model is None: + if task is None: + raise ValueError( + "You must specify at least a model (repo_id or URL) or a task, either when instantiating" + " `InferenceClient` or when making a request." + ) + model = self.get_recommended_model(task) + logger.info( + f"Using recommended model {model} for task {task}. Note that it is" + f" encouraged to explicitly set `model='{model}'` as the recommended" + " models list might get updated without prior notice." + ) + + # Compute InferenceAPI url + return ( + # Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks. + f"{INFERENCE_ENDPOINT}/pipeline/{task}/{model}" + if task in ("feature-extraction", "sentence-similarity") + # Otherwise, we use the default endpoint + else f"{INFERENCE_ENDPOINT}/models/{model}" + ) + + @staticmethod + def get_recommended_model(task: str) -> str: + """ + Get the model Hugging Face recommends for the input task. + + Args: + task (`str`): + The Hugging Face task to get which model Hugging Face recommends. + All available tasks can be found [here](https://huggingface.co/tasks). + + Returns: + `str`: Name of the model recommended for the input task. + + Raises: + `ValueError`: If Hugging Face has no recommendation for the input task. + """ + model = _fetch_recommended_models().get(task) + if model is None: + raise ValueError( + f"Task {task} has no recommended model. Please specify a model" + " explicitly. Visit https://huggingface.co/tasks for more info." + ) + return model + + def get_model_status(self, model: Optional[str] = None) -> ModelStatus: + """ + Get the status of a model hosted on the Inference API. + + + + This endpoint is mostly useful when you already know which model you want to use and want to check its + availability. If you want to discover already deployed models, you should rather use [`~InferenceClient.list_deployed_models`]. + + + + Args: + model (`str`, *optional*): + Identifier of the model for witch the status gonna be checked. If model is not provided, + the model associated with this instance of [`InferenceClient`] will be used. Only InferenceAPI service can be checked so the + identifier cannot be a URL. + + + Returns: + [`ModelStatus`]: An instance of ModelStatus dataclass, containing information, + about the state of the model: load, state, compute type and framework. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.get_model_status("bigcode/starcoder") + ModelStatus(loaded=True, state='Loaded', compute_type='gpu', framework='text-generation-inference') + ``` + """ + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if model.startswith("https://"): + raise NotImplementedError("Model status is only available for Inference API endpoints.") + url = f"{INFERENCE_ENDPOINT}/status/{model}" + + response = get_session().get(url, headers=self.headers) + hf_raise_for_status(response) + response_data = response.json() + + if "error" in response_data: + raise ValueError(response_data["error"]) + + return ModelStatus( + loaded=response_data["loaded"], + state=response_data["state"], + compute_type=response_data["compute_type"], + framework=response_data["framework"], + ) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_common.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..01ea32572dfe8561abb4ee211bb71a7acc2cd173 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_common.py @@ -0,0 +1,482 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities used by both the sync and async inference clients.""" + +import base64 +import io +import json +import logging +import time +from contextlib import contextmanager +from dataclasses import dataclass +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterable, + BinaryIO, + ContextManager, + Dict, + Generator, + Iterable, + List, + Literal, + NoReturn, + Optional, + Set, + Union, + overload, +) + +from requests import HTTPError + +from huggingface_hub.errors import ( + GenerationError, + IncompleteGenerationError, + OverloadedError, + TextGenerationError, + UnknownError, + ValidationError, +) + +from ..constants import ENDPOINT +from ..utils import ( + build_hf_headers, + get_session, + hf_raise_for_status, + is_aiohttp_available, + is_numpy_available, + is_pillow_available, +) +from ._generated.types import ( + ChatCompletionStreamOutput, + ChatCompletionStreamOutputChoice, + ChatCompletionStreamOutputDelta, + TextGenerationStreamOutput, +) + + +if TYPE_CHECKING: + from aiohttp import ClientResponse, ClientSession + from PIL import Image + +# TYPES +UrlT = str +PathT = Union[str, Path] +BinaryT = Union[bytes, BinaryIO] +ContentT = Union[BinaryT, PathT, UrlT] + +# Use to set a Accept: image/png header +TASKS_EXPECTING_IMAGES = {"text-to-image", "image-to-image"} + +logger = logging.getLogger(__name__) + + +# Add dataclass for ModelStatus. We use this dataclass in get_model_status function. +@dataclass +class ModelStatus: + """ + This Dataclass represents the the model status in the Hugging Face Inference API. + + Args: + loaded (`bool`): + If the model is currently loaded into Hugging Face's InferenceAPI. Models + are loaded on-demand, leading to the user's first request taking longer. + If a model is loaded, you can be assured that it is in a healthy state. + state (`str`): + The current state of the model. This can be 'Loaded', 'Loadable', 'TooBig'. + If a model's state is 'Loadable', it's not too big and has a supported + backend. Loadable models are automatically loaded when the user first + requests inference on the endpoint. This means it is transparent for the + user to load a model, except that the first call takes longer to complete. + compute_type (`Dict`): + Information about the compute resource the model is using or will use, such as 'gpu' type and number of + replicas. + framework (`str`): + The name of the framework that the model was built with, such as 'transformers' + or 'text-generation-inference'. + """ + + loaded: bool + state: str + compute_type: Dict + framework: str + + +## IMPORT UTILS + + +def _import_aiohttp(): + # Make sure `aiohttp` is installed on the machine. + if not is_aiohttp_available(): + raise ImportError("Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).") + import aiohttp + + return aiohttp + + +def _import_numpy(): + """Make sure `numpy` is installed on the machine.""" + if not is_numpy_available(): + raise ImportError("Please install numpy to use deal with embeddings (`pip install numpy`).") + import numpy + + return numpy + + +def _import_pil_image(): + """Make sure `PIL` is installed on the machine.""" + if not is_pillow_available(): + raise ImportError( + "Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be" + " post-processed, use `client.post(...)` and get the raw response from the server." + ) + from PIL import Image + + return Image + + +## RECOMMENDED MODELS + +# Will be globally fetched only once (see '_fetch_recommended_models') +_RECOMMENDED_MODELS: Optional[Dict[str, Optional[str]]] = None + + +def _fetch_recommended_models() -> Dict[str, Optional[str]]: + global _RECOMMENDED_MODELS + if _RECOMMENDED_MODELS is None: + response = get_session().get(f"{ENDPOINT}/api/tasks", headers=build_hf_headers()) + hf_raise_for_status(response) + _RECOMMENDED_MODELS = { + task: _first_or_none(details["widgetModels"]) for task, details in response.json().items() + } + return _RECOMMENDED_MODELS + + +def _first_or_none(items: List[Any]) -> Optional[Any]: + try: + return items[0] or None + except IndexError: + return None + + +## ENCODING / DECODING UTILS + + +@overload +def _open_as_binary( + content: ContentT, +) -> ContextManager[BinaryT]: ... # means "if input is not None, output is not None" + + +@overload +def _open_as_binary( + content: Literal[None], +) -> ContextManager[Literal[None]]: ... # means "if input is None, output is None" + + +@contextmanager # type: ignore +def _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]: + """Open `content` as a binary file, either from a URL, a local path, or raw bytes. + + Do nothing if `content` is None, + + TODO: handle a PIL.Image as input + TODO: handle base64 as input + """ + # If content is a string => must be either a URL or a path + if isinstance(content, str): + if content.startswith("https://") or content.startswith("http://"): + logger.debug(f"Downloading content from {content}") + yield get_session().get(content).content # TODO: retrieve as stream and pipe to post request ? + return + content = Path(content) + if not content.exists(): + raise FileNotFoundError( + f"File not found at {content}. If `data` is a string, it must either be a URL or a path to a local" + " file. To pass raw content, please encode it as bytes first." + ) + + # If content is a Path => open it + if isinstance(content, Path): + logger.debug(f"Opening content from {content}") + with content.open("rb") as f: + yield f + else: + # Otherwise: already a file-like object or None + yield content + + +def _b64_encode(content: ContentT) -> str: + """Encode a raw file (image, audio) into base64. Can be byes, an opened file, a path or a URL.""" + with _open_as_binary(content) as data: + data_as_bytes = data if isinstance(data, bytes) else data.read() + return base64.b64encode(data_as_bytes).decode() + + +def _b64_to_image(encoded_image: str) -> "Image": + """Parse a base64-encoded string into a PIL Image.""" + Image = _import_pil_image() + return Image.open(io.BytesIO(base64.b64decode(encoded_image))) + + +def _bytes_to_list(content: bytes) -> List: + """Parse bytes from a Response object into a Python list. + + Expects the response body to be JSON-encoded data. + + NOTE: This is exactly the same implementation as `_bytes_to_dict` and will not complain if the returned data is a + dictionary. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect. + """ + return json.loads(content.decode()) + + +def _bytes_to_dict(content: bytes) -> Dict: + """Parse bytes from a Response object into a Python dictionary. + + Expects the response body to be JSON-encoded data. + + NOTE: This is exactly the same implementation as `_bytes_to_list` and will not complain if the returned data is a + list. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect. + """ + return json.loads(content.decode()) + + +def _bytes_to_image(content: bytes) -> "Image": + """Parse bytes from a Response object into a PIL Image. + + Expects the response body to be raw bytes. To deal with b64 encoded images, use `_b64_to_image` instead. + """ + Image = _import_pil_image() + return Image.open(io.BytesIO(content)) + + +## STREAMING UTILS + + +def _stream_text_generation_response( + bytes_output_as_lines: Iterable[bytes], details: bool +) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]: + """Used in `InferenceClient.text_generation`.""" + # Parse ServerSentEvents + for byte_payload in bytes_output_as_lines: + output = _format_text_generation_stream_output(byte_payload, details) + if output is not None: + yield output + + +async def _async_stream_text_generation_response( + bytes_output_as_lines: AsyncIterable[bytes], details: bool +) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]: + """Used in `AsyncInferenceClient.text_generation`.""" + # Parse ServerSentEvents + async for byte_payload in bytes_output_as_lines: + output = _format_text_generation_stream_output(byte_payload, details) + if output is not None: + yield output + + +def _format_text_generation_stream_output( + byte_payload: bytes, details: bool +) -> Optional[Union[str, TextGenerationStreamOutput]]: + if not byte_payload.startswith(b"data:"): + return None # empty line + + # Decode payload + payload = byte_payload.decode("utf-8") + json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) + + # Either an error as being returned + if json_payload.get("error") is not None: + raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type")) + + # Or parse token payload + output = TextGenerationStreamOutput.parse_obj_as_instance(json_payload) + return output.token.text if not details else output + + +def _stream_chat_completion_response_from_text_generation( + text_generation_output: Iterable[TextGenerationStreamOutput], +) -> Iterable[ChatCompletionStreamOutput]: + """Used in `InferenceClient.chat_completion`.""" + created = int(time.time()) + for item in text_generation_output: + yield _format_chat_completion_stream_output_from_text_generation(item, created) + + +async def _async_stream_chat_completion_response_from_text_generation( + text_generation_output: AsyncIterable[TextGenerationStreamOutput], +) -> AsyncIterable[ChatCompletionStreamOutput]: + """Used in `AsyncInferenceClient.chat_completion`.""" + created = int(time.time()) + async for item in text_generation_output: + yield _format_chat_completion_stream_output_from_text_generation(item, created) + + +def _format_chat_completion_stream_output_from_text_generation( + item: TextGenerationStreamOutput, created: int +) -> ChatCompletionStreamOutput: + if item.details is None: + # new token generated => return delta + return ChatCompletionStreamOutput( + choices=[ + ChatCompletionStreamOutputChoice( + delta=ChatCompletionStreamOutputDelta( + role="assistant", + content=item.token.text, + ), + finish_reason=None, + index=0, + ) + ], + created=created, + ) + else: + # generation is completed => return finish reason + return ChatCompletionStreamOutput( + choices=[ + ChatCompletionStreamOutputChoice( + delta=ChatCompletionStreamOutputDelta(), + finish_reason=item.details.finish_reason, + index=0, + ) + ], + created=created, + ) + + +def _stream_chat_completion_response_from_bytes( + bytes_lines: Iterable[bytes], +) -> Iterable[ChatCompletionStreamOutput]: + """Used in `InferenceClient.chat_completion` if model is served with TGI.""" + for item in bytes_lines: + output = _format_chat_completion_stream_output_from_text_generation_from_bytes(item) + if output is not None: + yield output + + +async def _async_stream_chat_completion_response_from_bytes( + bytes_lines: AsyncIterable[bytes], +) -> AsyncIterable[ChatCompletionStreamOutput]: + """Used in `AsyncInferenceClient.chat_completion`.""" + async for item in bytes_lines: + output = _format_chat_completion_stream_output_from_text_generation_from_bytes(item) + if output is not None: + yield output + + +def _format_chat_completion_stream_output_from_text_generation_from_bytes( + byte_payload: bytes, +) -> Optional[ChatCompletionStreamOutput]: + if not byte_payload.startswith(b"data:"): + return None # empty line + + # Decode payload + payload = byte_payload.decode("utf-8") + json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) + return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload) + + +async def _async_yield_from(client: "ClientSession", response: "ClientResponse") -> AsyncIterable[bytes]: + async for byte_payload in response.content: + yield byte_payload + await client.close() + + +# "TGI servers" are servers running with the `text-generation-inference` backend. +# This backend is the go-to solution to run large language models at scale. However, +# for some smaller models (e.g. "gpt2") the default `transformers` + `api-inference` +# solution is still in use. +# +# Both approaches have very similar APIs, but not exactly the same. What we do first in +# the `text_generation` method is to assume the model is served via TGI. If we realize +# it's not the case (i.e. we receive an HTTP 400 Bad Request), we fallback to the +# default API with a warning message. We remember for each model if it's a TGI server +# or not using `_NON_TGI_SERVERS` global variable. +# +# In addition, TGI servers have a built-in API route for chat-completion, which is not +# available on the default API. We use this route to provide a more consistent behavior +# when available. +# +# For more details, see https://github.com/huggingface/text-generation-inference and +# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task. + +_NON_TGI_SERVERS: Set[Optional[str]] = set() + + +def _set_as_non_tgi(model: Optional[str]) -> None: + _NON_TGI_SERVERS.add(model) + + +def _is_tgi_server(model: Optional[str]) -> bool: + return model not in _NON_TGI_SERVERS + + +_NON_CHAT_COMPLETION_SERVER: Set[str] = set() + + +def _set_as_non_chat_completion_server(model: str) -> None: + print("Set as non chat completion", model) + _NON_CHAT_COMPLETION_SERVER.add(model) + + +def _is_chat_completion_server(model: str) -> bool: + return model not in _NON_CHAT_COMPLETION_SERVER + + +# TEXT GENERATION ERRORS +# ---------------------- +# Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation +# inference project (https://github.com/huggingface/text-generation-inference). +# ---------------------- + + +def raise_text_generation_error(http_error: HTTPError) -> NoReturn: + """ + Try to parse text-generation-inference error message and raise HTTPError in any case. + + Args: + error (`HTTPError`): + The HTTPError that have been raised. + """ + # Try to parse a Text Generation Inference error + + try: + # Hacky way to retrieve payload in case of aiohttp error + payload = getattr(http_error, "response_error_payload", None) or http_error.response.json() + error = payload.get("error") + error_type = payload.get("error_type") + except Exception: # no payload + raise http_error + + # If error_type => more information than `hf_raise_for_status` + if error_type is not None: + exception = _parse_text_generation_error(error, error_type) + raise exception from http_error + + # Otherwise, fallback to default error + raise http_error + + +def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError: + if error_type == "generation": + return GenerationError(error) # type: ignore + if error_type == "incomplete_generation": + return IncompleteGenerationError(error) # type: ignore + if error_type == "overloaded": + return OverloadedError(error) # type: ignore + if error_type == "validation": + return ValidationError(error) # type: ignore + return UnknownError(error) # type: ignore diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa23ffc654f406c10d041fd3098e2fe6fe6b8003 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e0ed4e8ec778f72f5cf05b1c4e9b3670363503 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..4f473ed106c7d168784ae8e96db18f46237d065e --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py @@ -0,0 +1,31 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any + +from .base import BaseInferenceType + + +@dataclass +class AudioToAudioInput(BaseInferenceType): + """Inputs for Audio to Audio inference""" + + inputs: Any + """The input audio data""" + + +@dataclass +class AudioToAudioOutputElement(BaseInferenceType): + """Outputs of inference for the Audio To Audio task + A generated audio file with its label. + """ + + blob: Any + """The generated audio file.""" + content_type: str + """The content type of audio file.""" + label: str + """The label of the audio file.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..c68be4bde00a98fbce46a2ef6a93bb549d4d920b --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py @@ -0,0 +1,85 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Optional, Union + +from .base import BaseInferenceType + + +@dataclass +class DocumentQuestionAnsweringInputData(BaseInferenceType): + """One (document, question) pair to answer""" + + image: Any + """The image on which the question is asked""" + question: str + """A question to ask of the document""" + + +@dataclass +class DocumentQuestionAnsweringParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Document Question Answering + """ + + doc_stride: Optional[int] = None + """If the words in the document are too long to fit with the question for the model, it will + be split in several chunks with some overlap. This argument controls the size of that + overlap. + """ + handle_impossible_answer: Optional[bool] = None + """Whether to accept impossible as an answer""" + lang: Optional[str] = None + """Language to use while running OCR. Defaults to english.""" + max_answer_len: Optional[int] = None + """The maximum length of predicted answers (e.g., only answers with a shorter length are + considered). + """ + max_question_len: Optional[int] = None + """The maximum length of the question after tokenization. It will be truncated if needed.""" + max_seq_len: Optional[int] = None + """The maximum length of the total sentence (context + question) in tokens of each chunk + passed to the model. The context will be split in several chunks (using doc_stride as + overlap) if needed. + """ + top_k: Optional[int] = None + """The number of answers to return (will be chosen by order of likelihood). Can return less + than top_k answers if there are not enough options available within the context. + """ + word_boxes: Optional[List[Union[List[float], str]]] = None + """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will + skip the OCR step and use the provided bounding boxes instead. + """ + + +@dataclass +class DocumentQuestionAnsweringInput(BaseInferenceType): + """Inputs for Document Question Answering inference""" + + inputs: DocumentQuestionAnsweringInputData + """One (document, question) pair to answer""" + parameters: Optional[DocumentQuestionAnsweringParameters] = None + """Additional inference parameters""" + + +@dataclass +class DocumentQuestionAnsweringOutputElement(BaseInferenceType): + """Outputs of inference for the Document Question Answering task""" + + answer: str + """The answer to the question.""" + end: int + """The end word index of the answer (in the OCR’d version of the input or provided word + boxes). + """ + score: float + """The probability associated to the answer.""" + start: int + """The start word index of the answer (in the OCR’d version of the input or provided word + boxes). + """ + words: List[int] + """The index of each word/box pair that is in the answer""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..e1fddf96fbb7c76c8ffee0c170c6554c8b4e2bf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py @@ -0,0 +1,50 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Optional + +from .base import BaseInferenceType + + +@dataclass +class FillMaskParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Fill Mask + """ + + targets: Optional[List[str]] = None + """When passed, the model will limit the scores to the passed targets instead of looking up + in the whole vocabulary. If the provided targets are not in the model vocab, they will be + tokenized and the first resulting token will be used (with a warning, and that might be + slower). + """ + top_k: Optional[int] = None + """When passed, overrides the number of predictions to return.""" + + +@dataclass +class FillMaskInput(BaseInferenceType): + """Inputs for Fill Mask inference""" + + inputs: str + """The text with masked tokens""" + parameters: Optional[FillMaskParameters] = None + """Additional inference parameters""" + + +@dataclass +class FillMaskOutputElement(BaseInferenceType): + """Outputs of inference for the Fill Mask task""" + + score: float + """The corresponding probability""" + sequence: str + """The corresponding input with the mask token prediction.""" + token: int + """The predicted token id (to replace the masked one).""" + token_str: Any + fill_mask_output_token_str: Optional[str] = None + """The predicted token (to replace the masked one).""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..67dd7c28b3cddd21d495ada70b7689a098accfd6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py @@ -0,0 +1,52 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Literal, Optional + +from .base import BaseInferenceType + + +ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"] + + +@dataclass +class ImageSegmentationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Image Segmentation + """ + + mask_threshold: Optional[float] = None + """Threshold to use when turning the predicted masks into binary values.""" + overlap_mask_area_threshold: Optional[float] = None + """Mask overlap threshold to eliminate small, disconnected segments.""" + subtask: Optional["ImageSegmentationSubtask"] = None + """Segmentation task to be performed, depending on model capabilities.""" + threshold: Optional[float] = None + """Probability threshold to filter out predicted masks.""" + + +@dataclass +class ImageSegmentationInput(BaseInferenceType): + """Inputs for Image Segmentation inference""" + + inputs: Any + """The input image data""" + parameters: Optional[ImageSegmentationParameters] = None + """Additional inference parameters""" + + +@dataclass +class ImageSegmentationOutputElement(BaseInferenceType): + """Outputs of inference for the Image Segmentation task + A predicted mask / segment + """ + + label: str + """The label of the predicted segment""" + mask: Any + """The corresponding mask as a black-and-white image""" + score: Optional[float] = None + """The score or confidence degreee the model has""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py new file mode 100644 index 0000000000000000000000000000000000000000..8c208ede6f7f2fb73b5dd059fe71bc8d2c4ca140 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py @@ -0,0 +1,55 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Optional + +from .base import BaseInferenceType + + +@dataclass +class ImageToImageTargetSize(BaseInferenceType): + """The size in pixel of the output image""" + + height: int + width: int + + +@dataclass +class ImageToImageParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Image To Image + """ + + guidance_scale: Optional[float] = None + """For diffusion models. A higher guidance scale value encourages the model to generate + images closely linked to the text prompt at the expense of lower image quality. + """ + negative_prompt: Optional[List[str]] = None + """One or several prompt to guide what NOT to include in image generation.""" + num_inference_steps: Optional[int] = None + """For diffusion models. The number of denoising steps. More denoising steps usually lead to + a higher quality image at the expense of slower inference. + """ + target_size: Optional[ImageToImageTargetSize] = None + """The size in pixel of the output image""" + + +@dataclass +class ImageToImageInput(BaseInferenceType): + """Inputs for Image To Image inference""" + + inputs: Any + """The input image data""" + parameters: Optional[ImageToImageParameters] = None + """Additional inference parameters""" + + +@dataclass +class ImageToImageOutput(BaseInferenceType): + """Outputs of inference for the Image To Image task""" + + image: Any + """The output image""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebb9a9bc667bdb0d2afd7bb8e482fc18f6634d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py @@ -0,0 +1,105 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Literal, Optional, Union + +from .base import BaseInferenceType + + +EarlyStoppingEnum = Literal["never"] + + +@dataclass +class ImageToTextGenerationParameters(BaseInferenceType): + """Parametrization of the text generation process + Ad-hoc parametrization of the text generation process + """ + + do_sample: Optional[bool] = None + """Whether to use sampling instead of greedy decoding when generating new tokens.""" + early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None + """Controls the stopping condition for beam-based methods.""" + epsilon_cutoff: Optional[float] = None + """If set to float strictly between 0 and 1, only tokens with a conditional probability + greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + """ + eta_cutoff: Optional[float] = None + """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + float strictly between 0 and 1, a token is only considered if it is greater than either + eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + for more details. + """ + max_length: Optional[int] = None + """The maximum length (in tokens) of the generated text, including the input.""" + max_new_tokens: Optional[int] = None + """The maximum number of tokens to generate. Takes precedence over maxLength.""" + min_length: Optional[int] = None + """The minimum length (in tokens) of the generated text, including the input.""" + min_new_tokens: Optional[int] = None + """The minimum number of tokens to generate. Takes precedence over maxLength.""" + num_beam_groups: Optional[int] = None + """Number of groups to divide num_beams into in order to ensure diversity among different + groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + """ + num_beams: Optional[int] = None + """Number of beams to use for beam search.""" + penalty_alpha: Optional[float] = None + """The value balances the model confidence and the degeneration penalty in contrastive + search decoding. + """ + temperature: Optional[float] = None + """The value used to modulate the next token probabilities.""" + top_k: Optional[int] = None + """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" + top_p: Optional[float] = None + """If set to float < 1, only the smallest set of most probable tokens with probabilities + that add up to top_p or higher are kept for generation. + """ + typical_p: Optional[float] = None + """Local typicality measures how similar the conditional probability of predicting a target + token next is to the expected conditional probability of predicting a random token next, + given the partial text already generated. If set to float < 1, the smallest set of the + most locally typical tokens with probabilities that add up to typical_p or higher are + kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + """ + use_cache: Optional[bool] = None + """Whether the model should use the past last key/values attentions to speed up decoding""" + + +@dataclass +class ImageToTextParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Image To Text + """ + + generate: Optional[ImageToTextGenerationParameters] = None + """Parametrization of the text generation process""" + max_new_tokens: Optional[int] = None + """The amount of maximum tokens to generate.""" + + +@dataclass +class ImageToTextInput(BaseInferenceType): + """Inputs for Image To Text inference""" + + inputs: Any + """The input image data""" + parameters: Optional[ImageToTextParameters] = None + """Additional inference parameters""" + + +@dataclass +class ImageToTextOutput(BaseInferenceType): + """Outputs of inference for the Image To Text task""" + + generated_text: Any + image_to_text_output_generated_text: Optional[str] = None + """The generated text.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..42b03a841b793fd4cb301bf51695bd35054a6af2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py @@ -0,0 +1,55 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Optional + +from .base import BaseInferenceType + + +@dataclass +class ObjectDetectionParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Object Detection + """ + + threshold: Optional[float] = None + """The probability necessary to make a prediction.""" + + +@dataclass +class ObjectDetectionInput(BaseInferenceType): + """Inputs for Object Detection inference""" + + inputs: Any + """The input image data""" + parameters: Optional[ObjectDetectionParameters] = None + """Additional inference parameters""" + + +@dataclass +class ObjectDetectionBoundingBox(BaseInferenceType): + """The predicted bounding box. Coordinates are relative to the top left corner of the input + image. + """ + + xmax: int + xmin: int + ymax: int + ymin: int + + +@dataclass +class ObjectDetectionOutputElement(BaseInferenceType): + """Outputs of inference for the Object Detection task""" + + box: ObjectDetectionBoundingBox + """The predicted bounding box. Coordinates are relative to the top left corner of the input + image. + """ + label: str + """The predicted label for the bounding box""" + score: float + """The associated score / probability""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a00e53264bd9a53a24d2ee7b12f428c068a117 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py @@ -0,0 +1,46 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, Literal, Optional + +from .base import BaseInferenceType + + +SummarizationGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] + + +@dataclass +class SummarizationGenerationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text2text Generation + """ + + clean_up_tokenization_spaces: Optional[bool] = None + """Whether to clean up the potential extra spaces in the text output.""" + generate_parameters: Optional[Dict[str, Any]] = None + """Additional parametrization of the text generation algorithm""" + truncation: Optional["SummarizationGenerationTruncationStrategy"] = None + """The truncation strategy to use""" + + +@dataclass +class SummarizationInput(BaseInferenceType): + """Inputs for Summarization inference + Inputs for Text2text Generation inference + """ + + inputs: str + """The input text data""" + parameters: Optional[SummarizationGenerationParameters] = None + """Additional inference parameters""" + + +@dataclass +class SummarizationOutput(BaseInferenceType): + """Outputs of inference for the Summarization task""" + + summary_text: str + """The summarized text.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb9fff641fd4ed2d8e797e59ae7b5f21f94c838 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py @@ -0,0 +1,45 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from .base import BaseInferenceType + + +@dataclass +class TableQuestionAnsweringInputData(BaseInferenceType): + """One (table, question) pair to answer""" + + question: str + """The question to be answered about the table""" + table: Dict[str, List[str]] + """The table to serve as context for the questions""" + + +@dataclass +class TableQuestionAnsweringInput(BaseInferenceType): + """Inputs for Table Question Answering inference""" + + inputs: TableQuestionAnsweringInputData + """One (table, question) pair to answer""" + parameters: Optional[Dict[str, Any]] = None + """Additional inference parameters""" + + +@dataclass +class TableQuestionAnsweringOutputElement(BaseInferenceType): + """Outputs of inference for the Table Question Answering task""" + + answer: str + """The answer of the question given the table. If there is an aggregator, the answer will be + preceded by `AGGREGATOR >`. + """ + cells: List[str] + """List of strings made up of the answer cell values.""" + coordinates: List[List[int]] + """Coordinates of the cells of the answers.""" + aggregator: Optional[str] = None + """If the model has an aggregator, this returns the aggregator.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..2866985071741b26d69c1afc8902738cff10ef03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py @@ -0,0 +1,161 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import List, Literal, Optional + +from .base import BaseInferenceType + + +@dataclass +class TextGenerationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text Generation + """ + + best_of: Optional[int] = None + """The number of sampling queries to run. Only the best one (in terms of total logprob) will + be returned. + """ + decoder_input_details: Optional[bool] = None + """Whether or not to output decoder input details""" + details: Optional[bool] = None + """Whether or not to output details""" + do_sample: Optional[bool] = None + """Whether to use logits sampling instead of greedy decoding when generating new tokens.""" + max_new_tokens: Optional[int] = None + """The maximum number of tokens to generate.""" + repetition_penalty: Optional[float] = None + """The parameter for repetition penalty. A value of 1.0 means no penalty. See [this + paper](https://hf.co/papers/1909.05858) for more details. + """ + return_full_text: Optional[bool] = None + """Whether to prepend the prompt to the generated text.""" + seed: Optional[int] = None + """The random sampling seed.""" + stop_sequences: Optional[List[str]] = None + """Stop generating tokens if a member of `stop_sequences` is generated.""" + temperature: Optional[float] = None + """The value used to modulate the logits distribution.""" + top_k: Optional[int] = None + """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" + top_p: Optional[float] = None + """If set to < 1, only the smallest set of most probable tokens with probabilities that add + up to `top_p` or higher are kept for generation. + """ + truncate: Optional[int] = None + """Truncate input tokens to the given size.""" + typical_p: Optional[float] = None + """Typical Decoding mass. See [Typical Decoding for Natural Language + Generation](https://hf.co/papers/2202.00666) for more information + """ + watermark: Optional[bool] = None + """Watermarking with [A Watermark for Large Language Models](https://hf.co/papers/2301.10226)""" + + +@dataclass +class TextGenerationInput(BaseInferenceType): + """Inputs for Text Generation inference""" + + inputs: str + """The text to initialize generation with""" + parameters: Optional[TextGenerationParameters] = None + """Additional inference parameters""" + stream: Optional[bool] = None + """Whether to stream output tokens""" + + +TextGenerationFinishReason = Literal["length", "eos_token", "stop_sequence"] + + +@dataclass +class TextGenerationPrefillToken(BaseInferenceType): + id: int + logprob: float + text: str + """The text associated with that token""" + + +@dataclass +class TextGenerationOutputToken(BaseInferenceType): + """Generated token.""" + + id: int + special: bool + """Whether or not that token is a special one""" + text: str + """The text associated with that token""" + logprob: Optional[float] = None + + +@dataclass +class TextGenerationOutputSequenceDetails(BaseInferenceType): + finish_reason: "TextGenerationFinishReason" + generated_text: str + """The generated text""" + generated_tokens: int + """The number of generated tokens""" + prefill: List[TextGenerationPrefillToken] + tokens: List[TextGenerationOutputToken] + """The generated tokens and associated details""" + seed: Optional[int] = None + """The random seed used for generation""" + top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None + """Most likely tokens""" + + +@dataclass +class TextGenerationOutputDetails(BaseInferenceType): + """When enabled, details about the generation""" + + finish_reason: "TextGenerationFinishReason" + """The reason why the generation was stopped.""" + generated_tokens: int + """The number of generated tokens""" + prefill: List[TextGenerationPrefillToken] + tokens: List[TextGenerationOutputToken] + """The generated tokens and associated details""" + best_of_sequences: Optional[List[TextGenerationOutputSequenceDetails]] = None + """Details about additional sequences when best_of is provided""" + seed: Optional[int] = None + """The random seed used for generation""" + top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None + """Most likely tokens""" + + +@dataclass +class TextGenerationOutput(BaseInferenceType): + """Outputs for Text Generation inference""" + + generated_text: str + """The generated text""" + details: Optional[TextGenerationOutputDetails] = None + """When enabled, details about the generation""" + + +@dataclass +class TextGenerationStreamDetails(BaseInferenceType): + """Generation details. Only available when the generation is finished.""" + + finish_reason: "TextGenerationFinishReason" + """The reason why the generation was stopped.""" + generated_tokens: int + """The number of generated tokens""" + seed: int + """The random seed used for generation""" + + +@dataclass +class TextGenerationStreamOutput(BaseInferenceType): + """Text Generation Stream Output""" + + token: TextGenerationOutputToken + """Generated token.""" + details: Optional[TextGenerationStreamDetails] = None + """Generation details. Only available when the generation is finished.""" + generated_text: Optional[str] = None + """The complete generated text. Only available when the generation is finished.""" + index: Optional[int] = None + """The token index within the stream. Optional to support older clients that omit it.""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py new file mode 100644 index 0000000000000000000000000000000000000000..40e53ab016d3a6f2098d26eadab9cf51805c31b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py @@ -0,0 +1,57 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Optional + +from .base import BaseInferenceType + + +@dataclass +class TextToImageTargetSize(BaseInferenceType): + """The size in pixel of the output image""" + + height: int + width: int + + +@dataclass +class TextToImageParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Text To Image + """ + + guidance_scale: Optional[float] = None + """For diffusion models. A higher guidance scale value encourages the model to generate + images closely linked to the text prompt at the expense of lower image quality. + """ + negative_prompt: Optional[List[str]] = None + """One or several prompt to guide what NOT to include in image generation.""" + num_inference_steps: Optional[int] = None + """For diffusion models. The number of denoising steps. More denoising steps usually lead to + a higher quality image at the expense of slower inference. + """ + scheduler: Optional[str] = None + """For diffusion models. Override the scheduler with a compatible one""" + target_size: Optional[TextToImageTargetSize] = None + """The size in pixel of the output image""" + + +@dataclass +class TextToImageInput(BaseInferenceType): + """Inputs for Text To Image inference""" + + inputs: str + """The input text data (sometimes called "prompt\"""" + parameters: Optional[TextToImageParameters] = None + """Additional inference parameters""" + + +@dataclass +class TextToImageOutput(BaseInferenceType): + """Outputs of inference for the Text To Image task""" + + image: Any + """The generated image""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/token_classification.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/token_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..2d60ea27eedbfe28096435c84e4002c0d9a64bc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/token_classification.py @@ -0,0 +1,53 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, List, Literal, Optional + +from .base import BaseInferenceType + + +TokenClassificationAggregationStrategy = Literal["none", "simple", "first", "average", "max"] + + +@dataclass +class TokenClassificationParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Token Classification + """ + + aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None + """The strategy used to fuse tokens based on model predictions""" + ignore_labels: Optional[List[str]] = None + """A list of labels to ignore""" + stride: Optional[int] = None + """The number of overlapping tokens between chunks when splitting the input text.""" + + +@dataclass +class TokenClassificationInput(BaseInferenceType): + """Inputs for Token Classification inference""" + + inputs: str + """The input text data""" + parameters: Optional[TokenClassificationParameters] = None + """Additional inference parameters""" + + +@dataclass +class TokenClassificationOutputElement(BaseInferenceType): + """Outputs of inference for the Token Classification task""" + + label: Any + score: float + """The associated score / probability""" + end: Optional[int] = None + """The character position in the input where this group ends.""" + entity_group: Optional[str] = None + """The predicted label for that group of tokens""" + start: Optional[int] = None + """The character position in the input where this group begins.""" + word: Optional[str] = None + """The corresponding text""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab7c14d8ab032c2e9bf24c835520182cb1b5e5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py @@ -0,0 +1,53 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from dataclasses import dataclass +from typing import Any, Optional + +from .base import BaseInferenceType + + +@dataclass +class VisualQuestionAnsweringInputData(BaseInferenceType): + """One (image, question) pair to answer""" + + image: Any + """The image.""" + question: Any + """The question to answer based on the image.""" + + +@dataclass +class VisualQuestionAnsweringParameters(BaseInferenceType): + """Additional inference parameters + Additional inference parameters for Visual Question Answering + """ + + top_k: Optional[int] = None + """The number of answers to return (will be chosen by order of likelihood). Note that we + return less than topk answers if there are not enough options available within the + context. + """ + + +@dataclass +class VisualQuestionAnsweringInput(BaseInferenceType): + """Inputs for Visual Question Answering inference""" + + inputs: VisualQuestionAnsweringInputData + """One (image, question) pair to answer""" + parameters: Optional[VisualQuestionAnsweringParameters] = None + """Additional inference parameters""" + + +@dataclass +class VisualQuestionAnsweringOutputElement(BaseInferenceType): + """Outputs of inference for the Visual Question Answering task""" + + label: Any + score: float + """The associated score / probability""" + answer: Optional[str] = None + """The answer to the question""" diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_templating.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_templating.py new file mode 100644 index 0000000000000000000000000000000000000000..10c349c24d676c75989b08bcf28181a497056a2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_templating.py @@ -0,0 +1,105 @@ +from functools import lru_cache +from typing import Callable, Dict, List, Optional, Union + +from ..utils import HfHubHTTPError, RepositoryNotFoundError, is_minijinja_available + + +class TemplateError(Exception): + """Any error raised while trying to fetch or render a chat template.""" + + +def _import_minijinja(): + if not is_minijinja_available(): + raise ImportError("Cannot render template. Please install minijinja using `pip install minijinja`.") + import minijinja # noqa: F401 + + return minijinja + + +def render_chat_prompt( + *, + model_id: str, + messages: List[Dict[str, str]], + token: Union[str, bool, None] = None, + add_generation_prompt: bool = True, + **kwargs, +) -> str: + """Render a chat prompt using a model's chat template. + + Args: + model_id (`str`): + The model id. + messages (`List[Dict[str, str]]`): + The list of messages to render. + token (`str` or `bool`, *optional*): + Hugging Face token. Will default to the locally saved token if not provided. + + Returns: + `str`: The rendered chat prompt. + + Raises: + `TemplateError`: If there's any issue while fetching, compiling or rendering the chat template. + """ + minijinja = _import_minijinja() + template = _fetch_and_compile_template(model_id=model_id, token=token) + + try: + return template(messages=messages, add_generation_prompt=add_generation_prompt, **kwargs) + except minijinja.TemplateError as e: + raise TemplateError(f"Error while trying to render chat prompt for model '{model_id}': {e}") from e + + +@lru_cache # TODO: lru_cache for raised exceptions +def _fetch_and_compile_template(*, model_id: str, token: Union[str, None]) -> Callable: + """Fetch and compile a model's chat template. + + Method is cached to avoid fetching the same model's config multiple times. + + Args: + model_id (`str`): + The model id. + token (`str` or `bool`, *optional*): + Hugging Face token. Will default to the locally saved token if not provided. + + Returns: + `Callable`: A callable that takes a list of messages and returns the rendered chat prompt. + """ + from huggingface_hub.hf_api import HfApi + + minijinja = _import_minijinja() + + # 1. fetch config from API + try: + config = HfApi(token=token).model_info(model_id).config + except RepositoryNotFoundError as e: + raise TemplateError(f"Cannot render chat template: model '{model_id}' not found.") from e + except HfHubHTTPError as e: + raise TemplateError(f"Error while trying to fetch chat template for model '{model_id}': {e}") from e + + # 2. check config validity + if config is None: + raise TemplateError(f"Config not found for model '{model_id}'.") + tokenizer_config = config.get("tokenizer_config") + if tokenizer_config is None: + raise TemplateError(f"Tokenizer config not found for model '{model_id}'.") + if tokenizer_config.get("chat_template") is None: + raise TemplateError(f"Chat template not found in tokenizer_config for model '{model_id}'.") + chat_template = tokenizer_config["chat_template"] + if not isinstance(chat_template, str): + raise TemplateError(f"Chat template must be a string, not '{type(chat_template)}' (model: {model_id}).") + + special_tokens: Dict[str, Optional[str]] = {} + for key, value in tokenizer_config.items(): + if "token" in key: + if isinstance(value, str): + special_tokens[key] = value + elif isinstance(value, dict) and value.get("__type") == "AddedToken": + special_tokens[key] = value.get("content") + + # 3. compile template and return + env = minijinja.Environment() + try: + env.add_template("chat_template", chat_template) + except minijinja.TemplateError as e: + raise TemplateError(f"Error while trying to compile chat template for model '{model_id}': {e}") from e + return lambda **kwargs: env.render_template("chat_template", **kwargs, **special_tokens) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/inference/_types.py b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_types.py new file mode 100644 index 0000000000000000000000000000000000000000..70c2137210c436c29398921178b6e8f45b6a4182 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/inference/_types.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, TypedDict + + +# Legacy types +# Types are now generated from the JSON schema spec in @huggingface/tasks. +# See ./src/huggingface_hub/inference/_generated/types + + +class ConversationalOutputConversation(TypedDict): + """Dictionary containing the "conversation" part of a [`~InferenceClient.conversational`] task. + + Args: + generated_responses (`List[str]`): + A list of the responses from the model. + past_user_inputs (`List[str]`): + A list of the inputs from the user. Must be the same length as `generated_responses`. + """ + + generated_responses: List[str] + past_user_inputs: List[str] + + +class ConversationalOutput(TypedDict): + """Dictionary containing the output of a [`~InferenceClient.conversational`] task. + + Args: + generated_text (`str`): + The last response from the model. + conversation (`ConversationalOutputConversation`): + The past conversation. + warnings (`List[str]`): + A list of warnings associated with the process. + """ + + conversation: ConversationalOutputConversation + generated_text: str + warnings: List[str] diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97c236ecec3233c863facf2207a0038cb173bcea --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +# ruff: noqa: F401 + +from . import tqdm as _tqdm # _tqdm is the module +from ._cache_assets import cached_assets_path +from ._cache_manager import ( + CachedFileInfo, + CachedRepoInfo, + CachedRevisionInfo, + CacheNotFound, + CorruptedCacheException, + DeleteCacheStrategy, + HFCacheInfo, + scan_cache_dir, +) +from ._chunk_utils import chunk_iterable +from ._datetime import parse_datetime +from ._errors import ( + BadRequestError, + DisabledRepoError, + EntryNotFoundError, + FileMetadataError, + GatedRepoError, + HfHubHTTPError, + LocalEntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + hf_raise_for_status, +) +from ._experimental import experimental +from ._fixes import SoftTemporaryDirectory, WeakFileLock, yaml_dump +from ._git_credential import list_credential_helpers, set_git_credential, unset_git_credential +from ._headers import LocalTokenNotFoundError, build_hf_headers, get_token_to_send +from ._hf_folder import HfFolder +from ._http import ( + OfflineModeIsEnabled, + configure_http_backend, + fix_hf_endpoint_in_url, + get_session, + http_backoff, + reset_sessions, +) +from ._pagination import paginate +from ._paths import IGNORE_GIT_FOLDER_PATTERNS, filter_repo_objects +from ._runtime import ( + dump_environment_info, + get_aiohttp_version, + get_fastai_version, + get_fastcore_version, + get_gradio_version, + get_graphviz_version, + get_hf_hub_version, + get_hf_transfer_version, + get_jinja_version, + get_minijinja_version, + get_numpy_version, + get_pillow_version, + get_pydantic_version, + get_pydot_version, + get_python_version, + get_tensorboard_version, + get_tf_version, + get_torch_version, + is_aiohttp_available, + is_fastai_available, + is_fastcore_available, + is_google_colab, + is_gradio_available, + is_graphviz_available, + is_hf_transfer_available, + is_jinja_available, + is_minijinja_available, + is_notebook, + is_numpy_available, + is_package_available, + is_pillow_available, + is_pydantic_available, + is_pydot_available, + is_safetensors_available, + is_tensorboard_available, + is_tf_available, + is_torch_available, +) +from ._safetensors import ( + NotASafetensorsRepoError, + SafetensorsFileMetadata, + SafetensorsParsingError, + SafetensorsRepoMetadata, + TensorInfo, +) +from ._subprocess import capture_output, run_interactive_subprocess, run_subprocess +from ._telemetry import send_telemetry +from ._token import get_token +from ._typing import is_jsonable +from ._validators import ( + HFValidationError, + smoothly_deprecate_use_auth_token, + validate_hf_hub_args, + validate_repo_id, +) +from .tqdm import ( + are_progress_bars_disabled, + disable_progress_bars, + enable_progress_bars, + tqdm, + tqdm_stream_file, +) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10f7b05e3703407ac6a0ce3d48d75ab666b8cb34 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ae7c977659f3b75da0e1a79fe9c22518f7c1dd9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdffe4923b46987a43e2f4b56af7d7c6e8266628 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3460cd8e40dc4a7f3f75c2fdc1bae865eb02d0c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py new file mode 100644 index 0000000000000000000000000000000000000000..e5d435df9b0bb0c67c0bcb5ef65711e9aef367f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py @@ -0,0 +1,135 @@ +# coding=utf-8 +# Copyright 2019-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pathlib import Path +from typing import Union + +from ..constants import HF_ASSETS_CACHE + + +def cached_assets_path( + library_name: str, + namespace: str = "default", + subfolder: str = "default", + *, + assets_dir: Union[str, Path, None] = None, +): + """Return a folder path to cache arbitrary files. + + `huggingface_hub` provides a canonical folder path to store assets. This is the + recommended way to integrate cache in a downstream library as it will benefit from + the builtins tools to scan and delete the cache properly. + + The distinction is made between files cached from the Hub and assets. Files from the + Hub are cached in a git-aware manner and entirely managed by `huggingface_hub`. See + [related documentation](https://huggingface.co/docs/huggingface_hub/how-to-cache). + All other files that a downstream library caches are considered to be "assets" + (files downloaded from external sources, extracted from a .tar archive, preprocessed + for training,...). + + Once the folder path is generated, it is guaranteed to exist and to be a directory. + The path is based on 3 levels of depth: the library name, a namespace and a + subfolder. Those 3 levels grants flexibility while allowing `huggingface_hub` to + expect folders when scanning/deleting parts of the assets cache. Within a library, + it is expected that all namespaces share the same subset of subfolder names but this + is not a mandatory rule. The downstream library has then full control on which file + structure to adopt within its cache. Namespace and subfolder are optional (would + default to a `"default/"` subfolder) but library name is mandatory as we want every + downstream library to manage its own cache. + + Expected tree: + ```text + assets/ + └── datasets/ + │ ├── SQuAD/ + │ │ ├── downloaded/ + │ │ ├── extracted/ + │ │ └── processed/ + │ ├── Helsinki-NLP--tatoeba_mt/ + │ ├── downloaded/ + │ ├── extracted/ + │ └── processed/ + └── transformers/ + ├── default/ + │ ├── something/ + ├── bert-base-cased/ + │ ├── default/ + │ └── training/ + hub/ + └── models--julien-c--EsperBERTo-small/ + ├── blobs/ + │ ├── (...) + │ ├── (...) + ├── refs/ + │ └── (...) + └── [ 128] snapshots/ + ├── 2439f60ef33a0d46d85da5001d52aeda5b00ce9f/ + │ ├── (...) + └── bbc77c8132af1cc5cf678da3f1ddf2de43606d48/ + └── (...) + ``` + + + Args: + library_name (`str`): + Name of the library that will manage the cache folder. Example: `"dataset"`. + namespace (`str`, *optional*, defaults to "default"): + Namespace to which the data belongs. Example: `"SQuAD"`. + subfolder (`str`, *optional*, defaults to "default"): + Subfolder in which the data will be stored. Example: `extracted`. + assets_dir (`str`, `Path`, *optional*): + Path to the folder where assets are cached. This must not be the same folder + where Hub files are cached. Defaults to `HF_HOME / "assets"` if not provided. + Can also be set with `HF_ASSETS_CACHE` environment variable. + + Returns: + Path to the cache folder (`Path`). + + Example: + ```py + >>> from huggingface_hub import cached_assets_path + + >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="download") + PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/download') + + >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="extracted") + PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/extracted') + + >>> cached_assets_path(library_name="datasets", namespace="Helsinki-NLP/tatoeba_mt") + PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/Helsinki-NLP--tatoeba_mt/default') + + >>> cached_assets_path(library_name="datasets", assets_dir="/tmp/tmp123456") + PosixPath('/tmp/tmp123456/datasets/default/default') + ``` + """ + # Resolve assets_dir + if assets_dir is None: + assets_dir = HF_ASSETS_CACHE + assets_dir = Path(assets_dir).expanduser().resolve() + + # Avoid names that could create path issues + for part in (" ", "/", "\\"): + library_name = library_name.replace(part, "--") + namespace = namespace.replace(part, "--") + subfolder = subfolder.replace(part, "--") + + # Path to subfolder is created + path = assets_dir / library_name / namespace / subfolder + try: + path.mkdir(exist_ok=True, parents=True) + except (FileExistsError, NotADirectoryError): + raise ValueError(f"Corrupted assets folder: cannot create directory because of an existing file ({path}).") + + # Return + return path diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..78ce8fdadc58ac3112324be25c02cbb56637f86e --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py @@ -0,0 +1,813 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to manage the HF cache directory.""" + +import os +import shutil +import time +from collections import defaultdict +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, FrozenSet, List, Literal, Optional, Set, Union + +from ..constants import HF_HUB_CACHE +from . import logging + + +logger = logging.get_logger(__name__) + +REPO_TYPE_T = Literal["model", "dataset", "space"] + +# List of OS-created helper files that need to be ignored +FILES_TO_IGNORE = [".DS_Store"] + + +class CacheNotFound(Exception): + """Exception thrown when the Huggingface cache is not found.""" + + cache_dir: Union[str, Path] + + def __init__(self, msg: str, cache_dir: Union[str, Path], *args, **kwargs): + super().__init__(msg, *args, **kwargs) + self.cache_dir = cache_dir + + +class CorruptedCacheException(Exception): + """Exception for any unexpected structure in the Huggingface cache-system.""" + + +@dataclass(frozen=True) +class CachedFileInfo: + """Frozen data structure holding information about a single cached file. + + Args: + file_name (`str`): + Name of the file. Example: `config.json`. + file_path (`Path`): + Path of the file in the `snapshots` directory. The file path is a symlink + referring to a blob in the `blobs` folder. + blob_path (`Path`): + Path of the blob file. This is equivalent to `file_path.resolve()`. + size_on_disk (`int`): + Size of the blob file in bytes. + blob_last_accessed (`float`): + Timestamp of the last time the blob file has been accessed (from any + revision). + blob_last_modified (`float`): + Timestamp of the last time the blob file has been modified/created. + + + + `blob_last_accessed` and `blob_last_modified` reliability can depend on the OS you + are using. See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result) + for more details. + + + """ + + file_name: str + file_path: Path + blob_path: Path + size_on_disk: int + + blob_last_accessed: float + blob_last_modified: float + + @property + def blob_last_accessed_str(self) -> str: + """ + (property) Timestamp of the last time the blob file has been accessed (from any + revision), returned as a human-readable string. + + Example: "2 weeks ago". + """ + return _format_timesince(self.blob_last_accessed) + + @property + def blob_last_modified_str(self) -> str: + """ + (property) Timestamp of the last time the blob file has been modified, returned + as a human-readable string. + + Example: "2 weeks ago". + """ + return _format_timesince(self.blob_last_modified) + + @property + def size_on_disk_str(self) -> str: + """ + (property) Size of the blob file as a human-readable string. + + Example: "42.2K". + """ + return _format_size(self.size_on_disk) + + +@dataclass(frozen=True) +class CachedRevisionInfo: + """Frozen data structure holding information about a revision. + + A revision correspond to a folder in the `snapshots` folder and is populated with + the exact tree structure as the repo on the Hub but contains only symlinks. A + revision can be either referenced by 1 or more `refs` or be "detached" (no refs). + + Args: + commit_hash (`str`): + Hash of the revision (unique). + Example: `"9338f7b671827df886678df2bdd7cc7b4f36dffd"`. + snapshot_path (`Path`): + Path to the revision directory in the `snapshots` folder. It contains the + exact tree structure as the repo on the Hub. + files: (`FrozenSet[CachedFileInfo]`): + Set of [`~CachedFileInfo`] describing all files contained in the snapshot. + refs (`FrozenSet[str]`): + Set of `refs` pointing to this revision. If the revision has no `refs`, it + is considered detached. + Example: `{"main", "2.4.0"}` or `{"refs/pr/1"}`. + size_on_disk (`int`): + Sum of the blob file sizes that are symlink-ed by the revision. + last_modified (`float`): + Timestamp of the last time the revision has been created/modified. + + + + `last_accessed` cannot be determined correctly on a single revision as blob files + are shared across revisions. + + + + + + `size_on_disk` is not necessarily the sum of all file sizes because of possible + duplicated files. Besides, only blobs are taken into account, not the (negligible) + size of folders and symlinks. + + + """ + + commit_hash: str + snapshot_path: Path + size_on_disk: int + files: FrozenSet[CachedFileInfo] + refs: FrozenSet[str] + + last_modified: float + + @property + def last_modified_str(self) -> str: + """ + (property) Timestamp of the last time the revision has been modified, returned + as a human-readable string. + + Example: "2 weeks ago". + """ + return _format_timesince(self.last_modified) + + @property + def size_on_disk_str(self) -> str: + """ + (property) Sum of the blob file sizes as a human-readable string. + + Example: "42.2K". + """ + return _format_size(self.size_on_disk) + + @property + def nb_files(self) -> int: + """ + (property) Total number of files in the revision. + """ + return len(self.files) + + +@dataclass(frozen=True) +class CachedRepoInfo: + """Frozen data structure holding information about a cached repository. + + Args: + repo_id (`str`): + Repo id of the repo on the Hub. Example: `"google/fleurs"`. + repo_type (`Literal["dataset", "model", "space"]`): + Type of the cached repo. + repo_path (`Path`): + Local path to the cached repo. + size_on_disk (`int`): + Sum of the blob file sizes in the cached repo. + nb_files (`int`): + Total number of blob files in the cached repo. + revisions (`FrozenSet[CachedRevisionInfo]`): + Set of [`~CachedRevisionInfo`] describing all revisions cached in the repo. + last_accessed (`float`): + Timestamp of the last time a blob file of the repo has been accessed. + last_modified (`float`): + Timestamp of the last time a blob file of the repo has been modified/created. + + + + `size_on_disk` is not necessarily the sum of all revisions sizes because of + duplicated files. Besides, only blobs are taken into account, not the (negligible) + size of folders and symlinks. + + + + + + `last_accessed` and `last_modified` reliability can depend on the OS you are using. + See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result) + for more details. + + + """ + + repo_id: str + repo_type: REPO_TYPE_T + repo_path: Path + size_on_disk: int + nb_files: int + revisions: FrozenSet[CachedRevisionInfo] + + last_accessed: float + last_modified: float + + @property + def last_accessed_str(self) -> str: + """ + (property) Last time a blob file of the repo has been accessed, returned as a + human-readable string. + + Example: "2 weeks ago". + """ + return _format_timesince(self.last_accessed) + + @property + def last_modified_str(self) -> str: + """ + (property) Last time a blob file of the repo has been modified, returned as a + human-readable string. + + Example: "2 weeks ago". + """ + return _format_timesince(self.last_modified) + + @property + def size_on_disk_str(self) -> str: + """ + (property) Sum of the blob file sizes as a human-readable string. + + Example: "42.2K". + """ + return _format_size(self.size_on_disk) + + @property + def refs(self) -> Dict[str, CachedRevisionInfo]: + """ + (property) Mapping between `refs` and revision data structures. + """ + return {ref: revision for revision in self.revisions for ref in revision.refs} + + +@dataclass(frozen=True) +class DeleteCacheStrategy: + """Frozen data structure holding the strategy to delete cached revisions. + + This object is not meant to be instantiated programmatically but to be returned by + [`~utils.HFCacheInfo.delete_revisions`]. See documentation for usage example. + + Args: + expected_freed_size (`float`): + Expected freed size once strategy is executed. + blobs (`FrozenSet[Path]`): + Set of blob file paths to be deleted. + refs (`FrozenSet[Path]`): + Set of reference file paths to be deleted. + repos (`FrozenSet[Path]`): + Set of entire repo paths to be deleted. + snapshots (`FrozenSet[Path]`): + Set of snapshots to be deleted (directory of symlinks). + """ + + expected_freed_size: int + blobs: FrozenSet[Path] + refs: FrozenSet[Path] + repos: FrozenSet[Path] + snapshots: FrozenSet[Path] + + @property + def expected_freed_size_str(self) -> str: + """ + (property) Expected size that will be freed as a human-readable string. + + Example: "42.2K". + """ + return _format_size(self.expected_freed_size) + + def execute(self) -> None: + """Execute the defined strategy. + + + + If this method is interrupted, the cache might get corrupted. Deletion order is + implemented so that references and symlinks are deleted before the actual blob + files. + + + + + + This method is irreversible. If executed, cached files are erased and must be + downloaded again. + + + """ + # Deletion order matters. Blobs are deleted in last so that the user can't end + # up in a state where a `ref`` refers to a missing snapshot or a snapshot + # symlink refers to a deleted blob. + + # Delete entire repos + for path in self.repos: + _try_delete_path(path, path_type="repo") + + # Delete snapshot directories + for path in self.snapshots: + _try_delete_path(path, path_type="snapshot") + + # Delete refs files + for path in self.refs: + _try_delete_path(path, path_type="ref") + + # Delete blob files + for path in self.blobs: + _try_delete_path(path, path_type="blob") + + logger.info(f"Cache deletion done. Saved {self.expected_freed_size_str}.") + + +@dataclass(frozen=True) +class HFCacheInfo: + """Frozen data structure holding information about the entire cache-system. + + This data structure is returned by [`scan_cache_dir`] and is immutable. + + Args: + size_on_disk (`int`): + Sum of all valid repo sizes in the cache-system. + repos (`FrozenSet[CachedRepoInfo]`): + Set of [`~CachedRepoInfo`] describing all valid cached repos found on the + cache-system while scanning. + warnings (`List[CorruptedCacheException]`): + List of [`~CorruptedCacheException`] that occurred while scanning the cache. + Those exceptions are captured so that the scan can continue. Corrupted repos + are skipped from the scan. + + + + Here `size_on_disk` is equal to the sum of all repo sizes (only blobs). However if + some cached repos are corrupted, their sizes are not taken into account. + + + """ + + size_on_disk: int + repos: FrozenSet[CachedRepoInfo] + warnings: List[CorruptedCacheException] + + @property + def size_on_disk_str(self) -> str: + """ + (property) Sum of all valid repo sizes in the cache-system as a human-readable + string. + + Example: "42.2K". + """ + return _format_size(self.size_on_disk) + + def delete_revisions(self, *revisions: str) -> DeleteCacheStrategy: + """Prepare the strategy to delete one or more revisions cached locally. + + Input revisions can be any revision hash. If a revision hash is not found in the + local cache, a warning is thrown but no error is raised. Revisions can be from + different cached repos since hashes are unique across repos, + + Examples: + ```py + >>> from huggingface_hub import scan_cache_dir + >>> cache_info = scan_cache_dir() + >>> delete_strategy = cache_info.delete_revisions( + ... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa" + ... ) + >>> print(f"Will free {delete_strategy.expected_freed_size_str}.") + Will free 7.9K. + >>> delete_strategy.execute() + Cache deletion done. Saved 7.9K. + ``` + + ```py + >>> from huggingface_hub import scan_cache_dir + >>> scan_cache_dir().delete_revisions( + ... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa", + ... "e2983b237dccf3ab4937c97fa717319a9ca1a96d", + ... "6c0e6080953db56375760c0471a8c5f2929baf11", + ... ).execute() + Cache deletion done. Saved 8.6G. + ``` + + + + `delete_revisions` returns a [`~utils.DeleteCacheStrategy`] object that needs to + be executed. The [`~utils.DeleteCacheStrategy`] is not meant to be modified but + allows having a dry run before actually executing the deletion. + + + """ + hashes_to_delete: Set[str] = set(revisions) + + repos_with_revisions: Dict[CachedRepoInfo, Set[CachedRevisionInfo]] = defaultdict(set) + + for repo in self.repos: + for revision in repo.revisions: + if revision.commit_hash in hashes_to_delete: + repos_with_revisions[repo].add(revision) + hashes_to_delete.remove(revision.commit_hash) + + if len(hashes_to_delete) > 0: + logger.warning(f"Revision(s) not found - cannot delete them: {', '.join(hashes_to_delete)}") + + delete_strategy_blobs: Set[Path] = set() + delete_strategy_refs: Set[Path] = set() + delete_strategy_repos: Set[Path] = set() + delete_strategy_snapshots: Set[Path] = set() + delete_strategy_expected_freed_size = 0 + + for affected_repo, revisions_to_delete in repos_with_revisions.items(): + other_revisions = affected_repo.revisions - revisions_to_delete + + # If no other revisions, it means all revisions are deleted + # -> delete the entire cached repo + if len(other_revisions) == 0: + delete_strategy_repos.add(affected_repo.repo_path) + delete_strategy_expected_freed_size += affected_repo.size_on_disk + continue + + # Some revisions of the repo will be deleted but not all. We need to filter + # which blob files will not be linked anymore. + for revision_to_delete in revisions_to_delete: + # Snapshot dir + delete_strategy_snapshots.add(revision_to_delete.snapshot_path) + + # Refs dir + for ref in revision_to_delete.refs: + delete_strategy_refs.add(affected_repo.repo_path / "refs" / ref) + + # Blobs dir + for file in revision_to_delete.files: + if file.blob_path not in delete_strategy_blobs: + is_file_alone = True + for revision in other_revisions: + for rev_file in revision.files: + if file.blob_path == rev_file.blob_path: + is_file_alone = False + break + if not is_file_alone: + break + + # Blob file not referenced by remaining revisions -> delete + if is_file_alone: + delete_strategy_blobs.add(file.blob_path) + delete_strategy_expected_freed_size += file.size_on_disk + + # Return the strategy instead of executing it. + return DeleteCacheStrategy( + blobs=frozenset(delete_strategy_blobs), + refs=frozenset(delete_strategy_refs), + repos=frozenset(delete_strategy_repos), + snapshots=frozenset(delete_strategy_snapshots), + expected_freed_size=delete_strategy_expected_freed_size, + ) + + +def scan_cache_dir(cache_dir: Optional[Union[str, Path]] = None) -> HFCacheInfo: + """Scan the entire HF cache-system and return a [`~HFCacheInfo`] structure. + + Use `scan_cache_dir` in order to programmatically scan your cache-system. The cache + will be scanned repo by repo. If a repo is corrupted, a [`~CorruptedCacheException`] + will be thrown internally but captured and returned in the [`~HFCacheInfo`] + structure. Only valid repos get a proper report. + + ```py + >>> from huggingface_hub import scan_cache_dir + + >>> hf_cache_info = scan_cache_dir() + HFCacheInfo( + size_on_disk=3398085269, + repos=frozenset({ + CachedRepoInfo( + repo_id='t5-small', + repo_type='model', + repo_path=PosixPath(...), + size_on_disk=970726914, + nb_files=11, + revisions=frozenset({ + CachedRevisionInfo( + commit_hash='d78aea13fa7ecd06c29e3e46195d6341255065d5', + size_on_disk=970726339, + snapshot_path=PosixPath(...), + files=frozenset({ + CachedFileInfo( + file_name='config.json', + size_on_disk=1197 + file_path=PosixPath(...), + blob_path=PosixPath(...), + ), + CachedFileInfo(...), + ... + }), + ), + CachedRevisionInfo(...), + ... + }), + ), + CachedRepoInfo(...), + ... + }), + warnings=[ + CorruptedCacheException("Snapshots dir doesn't exist in cached repo: ..."), + CorruptedCacheException(...), + ... + ], + ) + ``` + + You can also print a detailed report directly from the `huggingface-cli` using: + ```text + > huggingface-cli scan-cache + REPO ID REPO TYPE SIZE ON DISK NB FILES REFS LOCAL PATH + --------------------------- --------- ------------ -------- ------------------- ------------------------------------------------------------------------- + glue dataset 116.3K 15 1.17.0, main, 2.4.0 /Users/lucain/.cache/huggingface/hub/datasets--glue + google/fleurs dataset 64.9M 6 main, refs/pr/1 /Users/lucain/.cache/huggingface/hub/datasets--google--fleurs + Jean-Baptiste/camembert-ner model 441.0M 7 main /Users/lucain/.cache/huggingface/hub/models--Jean-Baptiste--camembert-ner + bert-base-cased model 1.9G 13 main /Users/lucain/.cache/huggingface/hub/models--bert-base-cased + t5-base model 10.1K 3 main /Users/lucain/.cache/huggingface/hub/models--t5-base + t5-small model 970.7M 11 refs/pr/1, main /Users/lucain/.cache/huggingface/hub/models--t5-small + + Done in 0.0s. Scanned 6 repo(s) for a total of 3.4G. + Got 1 warning(s) while scanning. Use -vvv to print details. + ``` + + Args: + cache_dir (`str` or `Path`, `optional`): + Cache directory to cache. Defaults to the default HF cache directory. + + + + Raises: + + `CacheNotFound` + If the cache directory does not exist. + + [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) + If the cache directory is a file, instead of a directory. + + + + Returns: a [`~HFCacheInfo`] object. + """ + if cache_dir is None: + cache_dir = HF_HUB_CACHE + + cache_dir = Path(cache_dir).expanduser().resolve() + if not cache_dir.exists(): + raise CacheNotFound( + f"Cache directory not found: {cache_dir}. Please use `cache_dir` argument or set `HF_HUB_CACHE` environment variable.", + cache_dir=cache_dir, + ) + + if cache_dir.is_file(): + raise ValueError( + f"Scan cache expects a directory but found a file: {cache_dir}. Please use `cache_dir` argument or set `HF_HUB_CACHE` environment variable." + ) + + repos: Set[CachedRepoInfo] = set() + warnings: List[CorruptedCacheException] = [] + for repo_path in cache_dir.iterdir(): + if repo_path.name == ".locks": # skip './.locks/' folder + continue + try: + repos.add(_scan_cached_repo(repo_path)) + except CorruptedCacheException as e: + warnings.append(e) + + return HFCacheInfo( + repos=frozenset(repos), + size_on_disk=sum(repo.size_on_disk for repo in repos), + warnings=warnings, + ) + + +def _scan_cached_repo(repo_path: Path) -> CachedRepoInfo: + """Scan a single cache repo and return information about it. + + Any unexpected behavior will raise a [`~CorruptedCacheException`]. + """ + if not repo_path.is_dir(): + raise CorruptedCacheException(f"Repo path is not a directory: {repo_path}") + + if "--" not in repo_path.name: + raise CorruptedCacheException(f"Repo path is not a valid HuggingFace cache directory: {repo_path}") + + repo_type, repo_id = repo_path.name.split("--", maxsplit=1) + repo_type = repo_type[:-1] # "models" -> "model" + repo_id = repo_id.replace("--", "/") # google/fleurs -> "google/fleurs" + + if repo_type not in {"dataset", "model", "space"}: + raise CorruptedCacheException( + f"Repo type must be `dataset`, `model` or `space`, found `{repo_type}` ({repo_path})." + ) + + blob_stats: Dict[Path, os.stat_result] = {} # Key is blob_path, value is blob stats + + snapshots_path = repo_path / "snapshots" + refs_path = repo_path / "refs" + + if not snapshots_path.exists() or not snapshots_path.is_dir(): + raise CorruptedCacheException(f"Snapshots dir doesn't exist in cached repo: {snapshots_path}") + + # Scan over `refs` directory + + # key is revision hash, value is set of refs + refs_by_hash: Dict[str, Set[str]] = defaultdict(set) + if refs_path.exists(): + # Example of `refs` directory + # ── refs + # ├── main + # └── refs + # └── pr + # └── 1 + if refs_path.is_file(): + raise CorruptedCacheException(f"Refs directory cannot be a file: {refs_path}") + + for ref_path in refs_path.glob("**/*"): + # glob("**/*") iterates over all files and directories -> skip directories + if ref_path.is_dir(): + continue + + ref_name = str(ref_path.relative_to(refs_path)) + with ref_path.open() as f: + commit_hash = f.read() + + refs_by_hash[commit_hash].add(ref_name) + + # Scan snapshots directory + cached_revisions: Set[CachedRevisionInfo] = set() + for revision_path in snapshots_path.iterdir(): + # Ignore OS-created helper files + if revision_path.name in FILES_TO_IGNORE: + continue + if revision_path.is_file(): + raise CorruptedCacheException(f"Snapshots folder corrupted. Found a file: {revision_path}") + + cached_files = set() + for file_path in revision_path.glob("**/*"): + # glob("**/*") iterates over all files and directories -> skip directories + if file_path.is_dir(): + continue + + blob_path = Path(file_path).resolve() + if not blob_path.exists(): + raise CorruptedCacheException(f"Blob missing (broken symlink): {blob_path}") + + if blob_path not in blob_stats: + blob_stats[blob_path] = blob_path.stat() + + cached_files.add( + CachedFileInfo( + file_name=file_path.name, + file_path=file_path, + size_on_disk=blob_stats[blob_path].st_size, + blob_path=blob_path, + blob_last_accessed=blob_stats[blob_path].st_atime, + blob_last_modified=blob_stats[blob_path].st_mtime, + ) + ) + + # Last modified is either the last modified blob file or the revision folder + # itself if it is empty + if len(cached_files) > 0: + revision_last_modified = max(blob_stats[file.blob_path].st_mtime for file in cached_files) + else: + revision_last_modified = revision_path.stat().st_mtime + + cached_revisions.add( + CachedRevisionInfo( + commit_hash=revision_path.name, + files=frozenset(cached_files), + refs=frozenset(refs_by_hash.pop(revision_path.name, set())), + size_on_disk=sum( + blob_stats[blob_path].st_size for blob_path in set(file.blob_path for file in cached_files) + ), + snapshot_path=revision_path, + last_modified=revision_last_modified, + ) + ) + + # Check that all refs referred to an existing revision + if len(refs_by_hash) > 0: + raise CorruptedCacheException( + f"Reference(s) refer to missing commit hashes: {dict(refs_by_hash)} ({repo_path})." + ) + + # Last modified is either the last modified blob file or the repo folder itself if + # no blob files has been found. Same for last accessed. + if len(blob_stats) > 0: + repo_last_accessed = max(stat.st_atime for stat in blob_stats.values()) + repo_last_modified = max(stat.st_mtime for stat in blob_stats.values()) + else: + repo_stats = repo_path.stat() + repo_last_accessed = repo_stats.st_atime + repo_last_modified = repo_stats.st_mtime + + # Build and return frozen structure + return CachedRepoInfo( + nb_files=len(blob_stats), + repo_id=repo_id, + repo_path=repo_path, + repo_type=repo_type, # type: ignore + revisions=frozenset(cached_revisions), + size_on_disk=sum(stat.st_size for stat in blob_stats.values()), + last_accessed=repo_last_accessed, + last_modified=repo_last_modified, + ) + + +def _format_size(num: int) -> str: + """Format size in bytes into a human-readable string. + + Taken from https://stackoverflow.com/a/1094933 + """ + num_f = float(num) + for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: + if abs(num_f) < 1000.0: + return f"{num_f:3.1f}{unit}" + num_f /= 1000.0 + return f"{num_f:.1f}Y" + + +_TIMESINCE_CHUNKS = ( + # Label, divider, max value + ("second", 1, 60), + ("minute", 60, 60), + ("hour", 60 * 60, 24), + ("day", 60 * 60 * 24, 6), + ("week", 60 * 60 * 24 * 7, 6), + ("month", 60 * 60 * 24 * 30, 11), + ("year", 60 * 60 * 24 * 365, None), +) + + +def _format_timesince(ts: float) -> str: + """Format timestamp in seconds into a human-readable string, relative to now. + + Vaguely inspired by Django's `timesince` formatter. + """ + delta = time.time() - ts + if delta < 20: + return "a few seconds ago" + for label, divider, max_value in _TIMESINCE_CHUNKS: # noqa: B007 + value = round(delta / divider) + if max_value is not None and value <= max_value: + break + return f"{value} {label}{'s' if value > 1 else ''} ago" + + +def _try_delete_path(path: Path, path_type: str) -> None: + """Try to delete a local file or folder. + + If the path does not exists, error is logged as a warning and then ignored. + + Args: + path (`Path`) + Path to delete. Can be a file or a folder. + path_type (`str`) + What path are we deleting ? Only for logging purposes. Example: "snapshot". + """ + logger.info(f"Delete {path_type}: {path}") + try: + if path.is_file(): + os.remove(path) + else: + shutil.rmtree(path) + except FileNotFoundError: + logger.warning(f"Couldn't delete {path_type}: file not found ({path})", exc_info=True) + except PermissionError: + logger.warning(f"Couldn't delete {path_type}: permission denied ({path})", exc_info=True) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb8d6e418c76accd1ecd61158b4bdd265e12f71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py @@ -0,0 +1,136 @@ +import warnings +from functools import wraps +from inspect import Parameter, signature +from typing import Iterable, Optional + + +def _deprecate_positional_args(*, version: str): + """Decorator for methods that issues warnings for positional arguments. + Using the keyword-only argument syntax in pep 3102, arguments after the + * will issue a warning when passed as a positional argument. + + Args: + version (`str`): + The version when positional arguments will result in error. + """ + + def _inner_deprecate_positional_args(f): + sig = signature(f) + kwonly_args = [] + all_args = [] + for name, param in sig.parameters.items(): + if param.kind == Parameter.POSITIONAL_OR_KEYWORD: + all_args.append(name) + elif param.kind == Parameter.KEYWORD_ONLY: + kwonly_args.append(name) + + @wraps(f) + def inner_f(*args, **kwargs): + extra_args = len(args) - len(all_args) + if extra_args <= 0: + return f(*args, **kwargs) + # extra_args > 0 + args_msg = [ + f"{name}='{arg}'" if isinstance(arg, str) else f"{name}={arg}" + for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:]) + ] + args_msg = ", ".join(args_msg) + warnings.warn( + f"Deprecated positional argument(s) used in '{f.__name__}': pass" + f" {args_msg} as keyword args. From version {version} passing these" + " as positional arguments will result in an error,", + FutureWarning, + ) + kwargs.update(zip(sig.parameters, args)) + return f(**kwargs) + + return inner_f + + return _inner_deprecate_positional_args + + +def _deprecate_arguments( + *, + version: str, + deprecated_args: Iterable[str], + custom_message: Optional[str] = None, +): + """Decorator to issue warnings when using deprecated arguments. + + TODO: could be useful to be able to set a custom error message. + + Args: + version (`str`): + The version when deprecated arguments will result in error. + deprecated_args (`List[str]`): + List of the arguments to be deprecated. + custom_message (`str`, *optional*): + Warning message that is raised. If not passed, a default warning message + will be created. + """ + + def _inner_deprecate_positional_args(f): + sig = signature(f) + + @wraps(f) + def inner_f(*args, **kwargs): + # Check for used deprecated arguments + used_deprecated_args = [] + for _, parameter in zip(args, sig.parameters.values()): + if parameter.name in deprecated_args: + used_deprecated_args.append(parameter.name) + for kwarg_name, kwarg_value in kwargs.items(): + if ( + # If argument is deprecated but still used + kwarg_name in deprecated_args + # And then the value is not the default value + and kwarg_value != sig.parameters[kwarg_name].default + ): + used_deprecated_args.append(kwarg_name) + + # Warn and proceed + if len(used_deprecated_args) > 0: + message = ( + f"Deprecated argument(s) used in '{f.__name__}':" + f" {', '.join(used_deprecated_args)}. Will not be supported from" + f" version '{version}'." + ) + if custom_message is not None: + message += "\n\n" + custom_message + warnings.warn(message, FutureWarning) + return f(*args, **kwargs) + + return inner_f + + return _inner_deprecate_positional_args + + +def _deprecate_method(*, version: str, message: Optional[str] = None): + """Decorator to issue warnings when using a deprecated method. + + Args: + version (`str`): + The version when deprecated arguments will result in error. + message (`str`, *optional*): + Warning message that is raised. If not passed, a default warning message + will be created. + """ + + def _inner_deprecate_method(f): + name = f.__name__ + if name == "__init__": + name = f.__qualname__.split(".")[0] # class name instead of method name + + @wraps(f) + def inner_f(*args, **kwargs): + warning_message = ( + f"'{name}' (from '{f.__module__}') is deprecated and will be removed from version '{version}'." + ) + if message is not None: + warning_message += " " + message + warnings.warn(warning_message, FutureWarning) + return f(*args, **kwargs) + + return inner_f + + return _inner_deprecate_method diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..70e97f0c24101243bce003f0ca55d852c02a48cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py @@ -0,0 +1,397 @@ +import re +from typing import Optional + +from requests import HTTPError, Response + +from ._fixes import JSONDecodeError + + +REPO_API_REGEX = re.compile( + r""" + # staging or production endpoint + ^https://[^/]+ + ( + # on /api/repo_type/repo_id + /api/(models|datasets|spaces)/(.+) + | + # or /repo_id/resolve/revision/... + /(.+)/resolve/(.+) + ) + """, + flags=re.VERBOSE, +) + + +class FileMetadataError(OSError): + """Error triggered when the metadata of a file on the Hub cannot be retrieved (missing ETag or commit_hash). + + Inherits from `OSError` for backward compatibility. + """ + + +class HfHubHTTPError(HTTPError): + """ + HTTPError to inherit from for any custom HTTP Error raised in HF Hub. + + Any HTTPError is converted at least into a `HfHubHTTPError`. If some information is + sent back by the server, it will be added to the error message. + + Added details: + - Request id from "X-Request-Id" header if exists. + - Server error message from the header "X-Error-Message". + - Server error message if we can found one in the response body. + + Example: + ```py + import requests + from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError + + response = get_session().post(...) + try: + hf_raise_for_status(response) + except HfHubHTTPError as e: + print(str(e)) # formatted message + e.request_id, e.server_message # details returned by server + + # Complete the error message with additional information once it's raised + e.append_to_message("\n`create_commit` expects the repository to exist.") + raise + ``` + """ + + request_id: Optional[str] = None + server_message: Optional[str] = None + + def __init__(self, message: str, response: Optional[Response] = None): + # Parse server information if any. + if response is not None: + self.request_id = response.headers.get("X-Request-Id") + try: + server_data = response.json() + except JSONDecodeError: + server_data = {} + + # Retrieve server error message from multiple sources + server_message_from_headers = response.headers.get("X-Error-Message") + server_message_from_body = server_data.get("error") + server_multiple_messages_from_body = "\n".join( + error["message"] for error in server_data.get("errors", []) if "message" in error + ) + + # Concatenate error messages + _server_message = "" + if server_message_from_headers is not None: # from headers + _server_message += server_message_from_headers + "\n" + if server_message_from_body is not None: # from body "error" + if isinstance(server_message_from_body, list): + server_message_from_body = "\n".join(server_message_from_body) + if server_message_from_body not in _server_message: + _server_message += server_message_from_body + "\n" + if server_multiple_messages_from_body is not None: # from body "errors" + if server_multiple_messages_from_body not in _server_message: + _server_message += server_multiple_messages_from_body + "\n" + _server_message = _server_message.strip() + + # Set message to `HfHubHTTPError` (if any) + if _server_message != "": + self.server_message = _server_message + + super().__init__( + _format_error_message( + message, + request_id=self.request_id, + server_message=self.server_message, + ), + response=response, # type: ignore + request=response.request if response is not None else None, # type: ignore + ) + + def append_to_message(self, additional_message: str) -> None: + """Append additional information to the `HfHubHTTPError` initial message.""" + self.args = (self.args[0] + additional_message,) + self.args[1:] + + +class RepositoryNotFoundError(HfHubHTTPError): + """ + Raised when trying to access a hf.co URL with an invalid repository name, or + with a private repo name the user does not have access to. + + Example: + + ```py + >>> from huggingface_hub import model_info + >>> model_info("") + (...) + huggingface_hub.utils._errors.RepositoryNotFoundError: 401 Client Error. (Request ID: PvMw_VjBMjVdMz53WKIzP) + + Repository Not Found for url: https://huggingface.co/api/models/%3Cnon_existent_repository%3E. + Please make sure you specified the correct `repo_id` and `repo_type`. + If the repo is private, make sure you are authenticated. + Invalid username or password. + ``` + """ + + +class GatedRepoError(RepositoryNotFoundError): + """ + Raised when trying to access a gated repository for which the user is not on the + authorized list. + + Note: derives from `RepositoryNotFoundError` to ensure backward compatibility. + + Example: + + ```py + >>> from huggingface_hub import model_info + >>> model_info("") + (...) + huggingface_hub.utils._errors.GatedRepoError: 403 Client Error. (Request ID: ViT1Bf7O_026LGSQuVqfa) + + Cannot access gated repo for url https://huggingface.co/api/models/ardent-figment/gated-model. + Access to model ardent-figment/gated-model is restricted and you are not in the authorized list. + Visit https://huggingface.co/ardent-figment/gated-model to ask for access. + ``` + """ + + +class DisabledRepoError(HfHubHTTPError): + """ + Raised when trying to access a repository that has been disabled by its author. + + Example: + + ```py + >>> from huggingface_hub import dataset_info + >>> dataset_info("laion/laion-art") + (...) + huggingface_hub.utils._errors.DisabledRepoError: 403 Client Error. (Request ID: Root=1-659fc3fa-3031673e0f92c71a2260dbe2;bc6f4dfb-b30a-4862-af0a-5cfe827610d8) + + Cannot access repository for url https://huggingface.co/api/datasets/laion/laion-art. + Access to this resource is disabled. + ``` + """ + + +class RevisionNotFoundError(HfHubHTTPError): + """ + Raised when trying to access a hf.co URL with a valid repository but an invalid + revision. + + Example: + + ```py + >>> from huggingface_hub import hf_hub_download + >>> hf_hub_download('bert-base-cased', 'config.json', revision='') + (...) + huggingface_hub.utils._errors.RevisionNotFoundError: 404 Client Error. (Request ID: Mwhe_c3Kt650GcdKEFomX) + + Revision Not Found for url: https://huggingface.co/bert-base-cased/resolve/%3Cnon-existent-revision%3E/config.json. + ``` + """ + + +class EntryNotFoundError(HfHubHTTPError): + """ + Raised when trying to access a hf.co URL with a valid repository and revision + but an invalid filename. + + Example: + + ```py + >>> from huggingface_hub import hf_hub_download + >>> hf_hub_download('bert-base-cased', '') + (...) + huggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: 53pNl6M0MxsnG5Sw8JA6x) + + Entry Not Found for url: https://huggingface.co/bert-base-cased/resolve/main/%3Cnon-existent-file%3E. + ``` + """ + + +class LocalEntryNotFoundError(EntryNotFoundError, FileNotFoundError, ValueError): + """ + Raised when trying to access a file or snapshot that is not on the disk when network is + disabled or unavailable (connection issue). The entry may exist on the Hub. + + Note: `ValueError` type is to ensure backward compatibility. + Note: `LocalEntryNotFoundError` derives from `HTTPError` because of `EntryNotFoundError` + even when it is not a network issue. + + Example: + + ```py + >>> from huggingface_hub import hf_hub_download + >>> hf_hub_download('bert-base-cased', '', local_files_only=True) + (...) + huggingface_hub.utils._errors.LocalEntryNotFoundError: Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co look-ups and downloads online, set 'local_files_only' to False. + ``` + """ + + def __init__(self, message: str): + super().__init__(message, response=None) + + +class BadRequestError(HfHubHTTPError, ValueError): + """ + Raised by `hf_raise_for_status` when the server returns a HTTP 400 error. + + Example: + + ```py + >>> resp = requests.post("hf.co/api/check", ...) + >>> hf_raise_for_status(resp, endpoint_name="check") + huggingface_hub.utils._errors.BadRequestError: Bad request for check endpoint: {details} (Request ID: XXX) + ``` + """ + + +def hf_raise_for_status(response: Response, endpoint_name: Optional[str] = None) -> None: + """ + Internal version of `response.raise_for_status()` that will refine a + potential HTTPError. Raised exception will be an instance of `HfHubHTTPError`. + + This helper is meant to be the unique method to raise_for_status when making a call + to the Hugging Face Hub. + + Example: + ```py + import requests + from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError + + response = get_session().post(...) + try: + hf_raise_for_status(response) + except HfHubHTTPError as e: + print(str(e)) # formatted message + e.request_id, e.server_message # details returned by server + + # Complete the error message with additional information once it's raised + e.append_to_message("\n`create_commit` expects the repository to exist.") + raise + ``` + + Args: + response (`Response`): + Response from the server. + endpoint_name (`str`, *optional*): + Name of the endpoint that has been called. If provided, the error message + will be more complete. + + + + Raises when the request has failed: + + - [`~utils.RepositoryNotFoundError`] + If the repository to download from cannot be found. This may be because it + doesn't exist, because `repo_type` is not set correctly, or because the repo + is `private` and you do not have access. + - [`~utils.GatedRepoError`] + If the repository exists but is gated and the user is not on the authorized + list. + - [`~utils.RevisionNotFoundError`] + If the repository exists but the revision couldn't be find. + - [`~utils.EntryNotFoundError`] + If the repository exists but the entry (e.g. the requested file) couldn't be + find. + - [`~utils.BadRequestError`] + If request failed with a HTTP 400 BadRequest error. + - [`~utils.HfHubHTTPError`] + If request failed for a reason not listed above. + + + """ + try: + response.raise_for_status() + except HTTPError as e: + error_code = response.headers.get("X-Error-Code") + error_message = response.headers.get("X-Error-Message") + + if error_code == "RevisionNotFound": + message = f"{response.status_code} Client Error." + "\n\n" + f"Revision Not Found for url: {response.url}." + raise RevisionNotFoundError(message, response) from e + + elif error_code == "EntryNotFound": + message = f"{response.status_code} Client Error." + "\n\n" + f"Entry Not Found for url: {response.url}." + raise EntryNotFoundError(message, response) from e + + elif error_code == "GatedRepo": + message = ( + f"{response.status_code} Client Error." + "\n\n" + f"Cannot access gated repo for url {response.url}." + ) + raise GatedRepoError(message, response) from e + + elif error_message == "Access to this resource is disabled.": + message = ( + f"{response.status_code} Client Error." + + "\n\n" + + f"Cannot access repository for url {response.url}." + + "\n" + + "Access to this resource is disabled." + ) + raise DisabledRepoError(message, response) from e + + elif error_code == "RepoNotFound" or ( + response.status_code == 401 + and response.request is not None + and response.request.url is not None + and REPO_API_REGEX.search(response.request.url) is not None + ): + # 401 is misleading as it is returned for: + # - private and gated repos if user is not authenticated + # - missing repos + # => for now, we process them as `RepoNotFound` anyway. + # See https://gist.github.com/Wauplin/46c27ad266b15998ce56a6603796f0b9 + message = ( + f"{response.status_code} Client Error." + + "\n\n" + + f"Repository Not Found for url: {response.url}." + + "\nPlease make sure you specified the correct `repo_id` and" + " `repo_type`.\nIf you are trying to access a private or gated repo," + " make sure you are authenticated." + ) + raise RepositoryNotFoundError(message, response) from e + + elif response.status_code == 400: + message = ( + f"\n\nBad request for {endpoint_name} endpoint:" if endpoint_name is not None else "\n\nBad request:" + ) + raise BadRequestError(message, response=response) from e + + elif response.status_code == 403: + message = ( + f"\n\n{response.status_code} Forbidden: {error_message}." + + f"\nCannot access content at: {response.url}." + + "\nIf you are trying to create or update content," + + "make sure you have a token with the `write` role." + ) + raise HfHubHTTPError(message, response=response) from e + + # Convert `HTTPError` into a `HfHubHTTPError` to display request information + # as well (request id and/or server error message) + raise HfHubHTTPError(str(e), response=response) from e + + +def _format_error_message(message: str, request_id: Optional[str], server_message: Optional[str]) -> str: + """ + Format the `HfHubHTTPError` error message based on initial message and information + returned by the server. + + Used when initializing `HfHubHTTPError`. + """ + # Add message from response body + if server_message is not None and len(server_message) > 0 and server_message.lower() not in message.lower(): + if "\n\n" in message: + message += "\n" + server_message + else: + message += "\n\n" + server_message + + # Add Request ID + if request_id is not None and str(request_id).lower() not in message.lower(): + request_id_message = f" (Request ID: {request_id})" + if "\n" in message: + newline_index = message.index("\n") + message = message[:newline_index] + request_id_message + message[newline_index:] + else: + message += request_id_message + + return message diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..34141eba09123c06fbca55c929a19a0264e5788e --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_experimental.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to flag a feature as "experimental" in Huggingface Hub.""" + +import warnings +from functools import wraps +from typing import Callable + +from .. import constants + + +def experimental(fn: Callable) -> Callable: + """Decorator to flag a feature as experimental. + + An experimental feature trigger a warning when used as it might be subject to breaking changes in the future. + Warnings can be disabled by setting the environment variable `HF_EXPERIMENTAL_WARNING` to `0`. + + Args: + fn (`Callable`): + The function to flag as experimental. + + Returns: + `Callable`: The decorated function. + + Example: + + ```python + >>> from huggingface_hub.utils import experimental + + >>> @experimental + ... def my_function(): + ... print("Hello world!") + + >>> my_function() + UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future. You can disable + this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment variable. + Hello world! + ``` + """ + # For classes, put the "experimental" around the "__new__" method => __new__ will be removed in warning message + name = fn.__qualname__[: -len(".__new__")] if fn.__qualname__.endswith(".__new__") else fn.__qualname__ + + @wraps(fn) + def _inner_fn(*args, **kwargs): + if not constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING: + warnings.warn( + f"'{name}' is experimental and might be subject to breaking changes in the future." + " You can disable this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment" + " variable.", + UserWarning, + ) + return fn(*args, **kwargs) + + return _inner_fn diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py new file mode 100644 index 0000000000000000000000000000000000000000..1edcbc1eeedc8d89a8c9b9ff8a4cbe4371ce2576 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_fixes.py @@ -0,0 +1,93 @@ +# JSONDecodeError was introduced in requests=2.27 released in 2022. +# This allows us to support older requests for users +# More information: https://github.com/psf/requests/pull/5856 +try: + from requests import JSONDecodeError # type: ignore # noqa: F401 +except ImportError: + try: + from simplejson import JSONDecodeError # type: ignore # noqa: F401 + except ImportError: + from json import JSONDecodeError # type: ignore # noqa: F401 +import contextlib +import os +import shutil +import stat +import tempfile +from functools import partial +from pathlib import Path +from typing import Callable, Generator, Optional, Union + +import yaml +from filelock import BaseFileLock, FileLock + + +# Wrap `yaml.dump` to set `allow_unicode=True` by default. +# +# Example: +# ```py +# >>> yaml.dump({"emoji": "👀", "some unicode": "日本か"}) +# 'emoji: "\\U0001F440"\nsome unicode: "\\u65E5\\u672C\\u304B"\n' +# +# >>> yaml_dump({"emoji": "👀", "some unicode": "日本か"}) +# 'emoji: "👀"\nsome unicode: "日本か"\n' +# ``` +yaml_dump: Callable[..., str] = partial(yaml.dump, stream=None, allow_unicode=True) # type: ignore + + +@contextlib.contextmanager +def SoftTemporaryDirectory( + suffix: Optional[str] = None, + prefix: Optional[str] = None, + dir: Optional[Union[Path, str]] = None, + **kwargs, +) -> Generator[Path, None, None]: + """ + Context manager to create a temporary directory and safely delete it. + + If tmp directory cannot be deleted normally, we set the WRITE permission and retry. + If cleanup still fails, we give up but don't raise an exception. This is equivalent + to `tempfile.TemporaryDirectory(..., ignore_cleanup_errors=True)` introduced in + Python 3.10. + + See https://www.scivision.dev/python-tempfile-permission-error-windows/. + """ + tmpdir = tempfile.TemporaryDirectory(prefix=prefix, suffix=suffix, dir=dir, **kwargs) + yield Path(tmpdir.name).resolve() + + try: + # First once with normal cleanup + shutil.rmtree(tmpdir.name) + except Exception: + # If failed, try to set write permission and retry + try: + shutil.rmtree(tmpdir.name, onerror=_set_write_permission_and_retry) + except Exception: + pass + + # And finally, cleanup the tmpdir. + # If it fails again, give up but do not throw error + try: + tmpdir.cleanup() + except Exception: + pass + + +def _set_write_permission_and_retry(func, path, excinfo): + os.chmod(path, stat.S_IWRITE) + func(path) + + +@contextlib.contextmanager +def WeakFileLock(lock_file: Union[str, Path]) -> Generator[BaseFileLock, None, None]: + lock = FileLock(lock_file) + lock.acquire() + + yield lock + + try: + return lock.release() + except OSError: + try: + Path(lock_file).unlink() + except OSError: + pass diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ed77f4e49ca88ff4fa9aba48cbf00195036013 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_git_credential.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to manage Git credentials.""" + +import re +import subprocess +from typing import List, Optional + +from ..constants import ENDPOINT +from ._subprocess import run_interactive_subprocess, run_subprocess + + +GIT_CREDENTIAL_REGEX = re.compile( + r""" + ^\s* # start of line + credential\.helper # credential.helper value + \s*=\s* # separator + (\w+) # the helper name (group 1) + (\s|$) # whitespace or end of line + """, + flags=re.MULTILINE | re.IGNORECASE | re.VERBOSE, +) + + +def list_credential_helpers(folder: Optional[str] = None) -> List[str]: + """Return the list of git credential helpers configured. + + See https://git-scm.com/docs/gitcredentials. + + Credentials are saved in all configured helpers (store, cache, macOS keychain,...). + Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential. + + Args: + folder (`str`, *optional*): + The folder in which to check the configured helpers. + """ + try: + output = run_subprocess("git config --list", folder=folder).stdout + parsed = _parse_credential_output(output) + return parsed + except subprocess.CalledProcessError as exc: + raise EnvironmentError(exc.stderr) + + +def set_git_credential(token: str, username: str = "hf_user", folder: Optional[str] = None) -> None: + """Save a username/token pair in git credential for HF Hub registry. + + Credentials are saved in all configured helpers (store, cache, macOS keychain,...). + Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential. + + Args: + username (`str`, defaults to `"hf_user"`): + A git username. Defaults to `"hf_user"`, the default user used in the Hub. + token (`str`, defaults to `"hf_user"`): + A git password. In practice, the User Access Token for the Hub. + See https://huggingface.co/settings/tokens. + folder (`str`, *optional*): + The folder in which to check the configured helpers. + """ + with run_interactive_subprocess("git credential approve", folder=folder) as ( + stdin, + _, + ): + stdin.write(f"url={ENDPOINT}\nusername={username.lower()}\npassword={token}\n\n") + stdin.flush() + + +def unset_git_credential(username: str = "hf_user", folder: Optional[str] = None) -> None: + """Erase credentials from git credential for HF Hub registry. + + Credentials are erased from the configured helpers (store, cache, macOS + keychain,...), if any. If `username` is not provided, any credential configured for + HF Hub endpoint is erased. + Calls "`git credential erase`" internally. See https://git-scm.com/docs/git-credential. + + Args: + username (`str`, defaults to `"hf_user"`): + A git username. Defaults to `"hf_user"`, the default user used in the Hub. + folder (`str`, *optional*): + The folder in which to check the configured helpers. + """ + with run_interactive_subprocess("git credential reject", folder=folder) as ( + stdin, + _, + ): + standard_input = f"url={ENDPOINT}\n" + if username is not None: + standard_input += f"username={username.lower()}\n" + standard_input += "\n" + + stdin.write(standard_input) + stdin.flush() + + +def _parse_credential_output(output: str) -> List[str]: + """Parse the output of `git credential fill` to extract the password. + + Args: + output (`str`): + The output of `git credential fill`. + """ + # NOTE: If user has set an helper for a custom URL, it will not we caught here. + # Example: `credential.https://huggingface.co.helper=store` + # See: https://github.com/huggingface/huggingface_hub/pull/1138#discussion_r1013324508 + return sorted( # Sort for nice printing + set( # Might have some duplicates + match[0] for match in GIT_CREDENTIAL_REGEX.findall(output) + ) + ) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..fdcaf06e9d19de202a6e84b8bde212d4482d1b07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py @@ -0,0 +1,241 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle headers to send in calls to Huggingface Hub.""" + +from typing import Dict, Optional, Union + +from .. import constants +from ._runtime import ( + get_fastai_version, + get_fastcore_version, + get_hf_hub_version, + get_python_version, + get_tf_version, + get_torch_version, + is_fastai_available, + is_fastcore_available, + is_tf_available, + is_torch_available, +) +from ._token import get_token +from ._validators import validate_hf_hub_args + + +class LocalTokenNotFoundError(EnvironmentError): + """Raised if local token is required but not found.""" + + +@validate_hf_hub_args +def build_hf_headers( + *, + token: Optional[Union[bool, str]] = None, + is_write_action: bool = False, + library_name: Optional[str] = None, + library_version: Optional[str] = None, + user_agent: Union[Dict, str, None] = None, + headers: Optional[Dict[str, str]] = None, +) -> Dict[str, str]: + """ + Build headers dictionary to send in a HF Hub call. + + By default, authorization token is always provided either from argument (explicit + use) or retrieved from the cache (implicit use). To explicitly avoid sending the + token to the Hub, set `token=False` or set the `HF_HUB_DISABLE_IMPLICIT_TOKEN` + environment variable. + + In case of an API call that requires write access, an error is thrown if token is + `None` or token is an organization token (starting with `"api_org***"`). + + In addition to the auth header, a user-agent is added to provide information about + the installed packages (versions of python, huggingface_hub, torch, tensorflow, + fastai and fastcore). + + Args: + token (`str`, `bool`, *optional*): + The token to be sent in authorization header for the Hub call: + - if a string, it is used as the Hugging Face token + - if `True`, the token is read from the machine (cache or env variable) + - if `False`, authorization header is not set + - if `None`, the token is read from the machine only except if + `HF_HUB_DISABLE_IMPLICIT_TOKEN` env variable is set. + is_write_action (`bool`, default to `False`): + Set to True if the API call requires a write access. If `True`, the token + will be validated (cannot be `None`, cannot start by `"api_org***"`). + library_name (`str`, *optional*): + The name of the library that is making the HTTP request. Will be added to + the user-agent header. + library_version (`str`, *optional*): + The version of the library that is making the HTTP request. Will be added + to the user-agent header. + user_agent (`str`, `dict`, *optional*): + The user agent info in the form of a dictionary or a single string. It will + be completed with information about the installed packages. + headers (`dict`, *optional*): + Additional headers to include in the request. Those headers take precedence + over the ones generated by this function. + + Returns: + A `Dict` of headers to pass in your API call. + + Example: + ```py + >>> build_hf_headers(token="hf_***") # explicit token + {"authorization": "Bearer hf_***", "user-agent": ""} + + >>> build_hf_headers(token=True) # explicitly use cached token + {"authorization": "Bearer hf_***",...} + + >>> build_hf_headers(token=False) # explicitly don't use cached token + {"user-agent": ...} + + >>> build_hf_headers() # implicit use of the cached token + {"authorization": "Bearer hf_***",...} + + # HF_HUB_DISABLE_IMPLICIT_TOKEN=True # to set as env variable + >>> build_hf_headers() # token is not sent + {"user-agent": ...} + + >>> build_hf_headers(token="api_org_***", is_write_action=True) + ValueError: You must use your personal account token for write-access methods. + + >>> build_hf_headers(library_name="transformers", library_version="1.2.3") + {"authorization": ..., "user-agent": "transformers/1.2.3; hf_hub/0.10.2; python/3.10.4; tensorflow/1.55"} + ``` + + Raises: + [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) + If organization token is passed and "write" access is required. + [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) + If "write" access is required but token is not passed and not saved locally. + [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError) + If `token=True` but token is not saved locally. + """ + # Get auth token to send + token_to_send = get_token_to_send(token) + _validate_token_to_send(token_to_send, is_write_action=is_write_action) + + # Combine headers + hf_headers = { + "user-agent": _http_user_agent( + library_name=library_name, + library_version=library_version, + user_agent=user_agent, + ) + } + if token_to_send is not None: + hf_headers["authorization"] = f"Bearer {token_to_send}" + if headers is not None: + hf_headers.update(headers) + return hf_headers + + +def get_token_to_send(token: Optional[Union[bool, str]]) -> Optional[str]: + """Select the token to send from either `token` or the cache.""" + # Case token is explicitly provided + if isinstance(token, str): + return token + + # Case token is explicitly forbidden + if token is False: + return None + + # Token is not provided: we get it from local cache + cached_token = get_token() + + # Case token is explicitly required + if token is True: + if cached_token is None: + raise LocalTokenNotFoundError( + "Token is required (`token=True`), but no token found. You" + " need to provide a token or be logged in to Hugging Face with" + " `huggingface-cli login` or `huggingface_hub.login`. See" + " https://huggingface.co/settings/tokens." + ) + return cached_token + + # Case implicit use of the token is forbidden by env variable + if constants.HF_HUB_DISABLE_IMPLICIT_TOKEN: + return None + + # Otherwise: we use the cached token as the user has not explicitly forbidden it + return cached_token + + +def _validate_token_to_send(token: Optional[str], is_write_action: bool) -> None: + if is_write_action: + if token is None: + raise ValueError( + "Token is required (write-access action) but no token found. You need" + " to provide a token or be logged in to Hugging Face with" + " `huggingface-cli login` or `huggingface_hub.login`. See" + " https://huggingface.co/settings/tokens." + ) + if token.startswith("api_org"): + raise ValueError( + "You must use your personal account token for write-access methods. To" + " generate a write-access token, go to" + " https://huggingface.co/settings/tokens" + ) + + +def _http_user_agent( + *, + library_name: Optional[str] = None, + library_version: Optional[str] = None, + user_agent: Union[Dict, str, None] = None, +) -> str: + """Format a user-agent string containing information about the installed packages. + + Args: + library_name (`str`, *optional*): + The name of the library that is making the HTTP request. + library_version (`str`, *optional*): + The version of the library that is making the HTTP request. + user_agent (`str`, `dict`, *optional*): + The user agent info in the form of a dictionary or a single string. + + Returns: + The formatted user-agent string. + """ + if library_name is not None: + ua = f"{library_name}/{library_version}" + else: + ua = "unknown/None" + ua += f"; hf_hub/{get_hf_hub_version()}" + ua += f"; python/{get_python_version()}" + + if not constants.HF_HUB_DISABLE_TELEMETRY: + if is_torch_available(): + ua += f"; torch/{get_torch_version()}" + if is_tf_available(): + ua += f"; tensorflow/{get_tf_version()}" + if is_fastai_available(): + ua += f"; fastai/{get_fastai_version()}" + if is_fastcore_available(): + ua += f"; fastcore/{get_fastcore_version()}" + + if isinstance(user_agent, dict): + ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) + elif isinstance(user_agent, str): + ua += "; " + user_agent + + return _deduplicate_user_agent(ua) + + +def _deduplicate_user_agent(user_agent: str) -> str: + """Deduplicate redundant information in the generated user-agent.""" + # Split around ";" > Strip whitespaces > Store as dict keys (ensure unicity) > format back as string + # Order is implicitly preserved by dictionary structure (see https://stackoverflow.com/a/53657523). + return "; ".join({key.strip(): None for key in user_agent.split(";")}.keys()) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..502b22658b44d2221b535cbd943348bb93213245 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contain helper class to retrieve/store token from/to local cache.""" + +import warnings +from pathlib import Path +from typing import Optional + +from .. import constants +from ._token import get_token + + +class HfFolder: + path_token = Path(constants.HF_TOKEN_PATH) + # Private attribute. Will be removed in v0.15 + _old_path_token = Path(constants._OLD_HF_TOKEN_PATH) + + # TODO: deprecate when adapted in transformers/datasets/gradio + # @_deprecate_method(version="1.0", message="Use `huggingface_hub.login` instead.") + @classmethod + def save_token(cls, token: str) -> None: + """ + Save token, creating folder as needed. + + Token is saved in the huggingface home folder. You can configure it by setting + the `HF_HOME` environment variable. + + Args: + token (`str`): + The token to save to the [`HfFolder`] + """ + cls.path_token.parent.mkdir(parents=True, exist_ok=True) + cls.path_token.write_text(token) + + # TODO: deprecate when adapted in transformers/datasets/gradio + # @_deprecate_method(version="1.0", message="Use `huggingface_hub.get_token` instead.") + @classmethod + def get_token(cls) -> Optional[str]: + """ + Get token or None if not existent. + + This method is deprecated in favor of [`huggingface_hub.get_token`] but is kept for backward compatibility. + Its behavior is the same as [`huggingface_hub.get_token`]. + + Returns: + `str` or `None`: The token, `None` if it doesn't exist. + """ + # 0. Check if token exist in old path but not new location + try: + cls._copy_to_new_path_and_warn() + except Exception: # if not possible (e.g. PermissionError), do not raise + pass + + return get_token() + + # TODO: deprecate when adapted in transformers/datasets/gradio + # @_deprecate_method(version="1.0", message="Use `huggingface_hub.logout` instead.") + @classmethod + def delete_token(cls) -> None: + """ + Deletes the token from storage. Does not fail if token does not exist. + """ + try: + cls.path_token.unlink() + except FileNotFoundError: + pass + + try: + cls._old_path_token.unlink() + except FileNotFoundError: + pass + + @classmethod + def _copy_to_new_path_and_warn(cls): + if cls._old_path_token.exists() and not cls.path_token.exists(): + cls.save_token(cls._old_path_token.read_text()) + warnings.warn( + f"A token has been found in `{cls._old_path_token}`. This is the old" + " path where tokens were stored. The new location is" + f" `{cls.path_token}` which is configurable using `HF_HOME` environment" + " variable. Your token has been copied to this new location. You can" + " now safely delete the old token file manually or use" + " `huggingface-cli logout`." + ) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_http.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_http.py new file mode 100644 index 0000000000000000000000000000000000000000..081d84a4f455e5b47c2f0f6483550dc1c8ac7a36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_http.py @@ -0,0 +1,321 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle HTTP requests in Huggingface Hub.""" + +import io +import os +import threading +import time +import uuid +from functools import lru_cache +from http import HTTPStatus +from typing import Callable, Optional, Tuple, Type, Union + +import requests +from requests import Response +from requests.adapters import HTTPAdapter +from requests.models import PreparedRequest + +from .. import constants +from . import logging +from ._typing import HTTP_METHOD_T + + +logger = logging.get_logger(__name__) + +# Both headers are used by the Hub to debug failed requests. +# `X_AMZN_TRACE_ID` is better as it also works to debug on Cloudfront and ALB. +# If `X_AMZN_TRACE_ID` is set, the Hub will use it as well. +X_AMZN_TRACE_ID = "X-Amzn-Trace-Id" +X_REQUEST_ID = "x-request-id" + + +class OfflineModeIsEnabled(ConnectionError): + """Raised when a request is made but `HF_HUB_OFFLINE=1` is set as environment variable.""" + + +class UniqueRequestIdAdapter(HTTPAdapter): + X_AMZN_TRACE_ID = "X-Amzn-Trace-Id" + + def add_headers(self, request, **kwargs): + super().add_headers(request, **kwargs) + + # Add random request ID => easier for server-side debug + if X_AMZN_TRACE_ID not in request.headers: + request.headers[X_AMZN_TRACE_ID] = request.headers.get(X_REQUEST_ID) or str(uuid.uuid4()) + + # Add debug log + has_token = str(request.headers.get("authorization", "")).startswith("Bearer hf_") + logger.debug( + f"Request {request.headers[X_AMZN_TRACE_ID]}: {request.method} {request.url} (authenticated: {has_token})" + ) + + def send(self, request: PreparedRequest, *args, **kwargs) -> Response: + """Catch any RequestException to append request id to the error message for debugging.""" + try: + return super().send(request, *args, **kwargs) + except requests.RequestException as e: + request_id = request.headers.get(X_AMZN_TRACE_ID) + if request_id is not None: + # Taken from https://stackoverflow.com/a/58270258 + e.args = (*e.args, f"(Request ID: {request_id})") + raise + + +class OfflineAdapter(HTTPAdapter): + def send(self, request: PreparedRequest, *args, **kwargs) -> Response: + raise OfflineModeIsEnabled( + f"Cannot reach {request.url}: offline mode is enabled. To disable it, please unset the `HF_HUB_OFFLINE` environment variable." + ) + + +def _default_backend_factory() -> requests.Session: + session = requests.Session() + if constants.HF_HUB_OFFLINE: + session.mount("http://", OfflineAdapter()) + session.mount("https://", OfflineAdapter()) + else: + session.mount("http://", UniqueRequestIdAdapter()) + session.mount("https://", UniqueRequestIdAdapter()) + return session + + +BACKEND_FACTORY_T = Callable[[], requests.Session] +_GLOBAL_BACKEND_FACTORY: BACKEND_FACTORY_T = _default_backend_factory + + +def configure_http_backend(backend_factory: BACKEND_FACTORY_T = _default_backend_factory) -> None: + """ + Configure the HTTP backend by providing a `backend_factory`. Any HTTP calls made by `huggingface_hub` will use a + Session object instantiated by this factory. This can be useful if you are running your scripts in a specific + environment requiring custom configuration (e.g. custom proxy or certifications). + + Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe, + `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory` + set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between + calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned. + + See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`. + + Example: + ```py + import requests + from huggingface_hub import configure_http_backend, get_session + + # Create a factory function that returns a Session with configured proxies + def backend_factory() -> requests.Session: + session = requests.Session() + session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"} + return session + + # Set it as the default session factory + configure_http_backend(backend_factory=backend_factory) + + # In practice, this is mostly done internally in `huggingface_hub` + session = get_session() + ``` + """ + global _GLOBAL_BACKEND_FACTORY + _GLOBAL_BACKEND_FACTORY = backend_factory + reset_sessions() + + +def get_session() -> requests.Session: + """ + Get a `requests.Session` object, using the session factory from the user. + + Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe, + `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory` + set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between + calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned. + + See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`. + + Example: + ```py + import requests + from huggingface_hub import configure_http_backend, get_session + + # Create a factory function that returns a Session with configured proxies + def backend_factory() -> requests.Session: + session = requests.Session() + session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"} + return session + + # Set it as the default session factory + configure_http_backend(backend_factory=backend_factory) + + # In practice, this is mostly done internally in `huggingface_hub` + session = get_session() + ``` + """ + return _get_session_from_cache(process_id=os.getpid(), thread_id=threading.get_ident()) + + +def reset_sessions() -> None: + """Reset the cache of sessions. + + Mostly used internally when sessions are reconfigured or an SSLError is raised. + See [`configure_http_backend`] for more details. + """ + _get_session_from_cache.cache_clear() + + +@lru_cache +def _get_session_from_cache(process_id: int, thread_id: int) -> requests.Session: + """ + Create a new session per thread using global factory. Using LRU cache (maxsize 128) to avoid memory leaks when + using thousands of threads. Cache is cleared when `configure_http_backend` is called. + """ + return _GLOBAL_BACKEND_FACTORY() + + +def http_backoff( + method: HTTP_METHOD_T, + url: str, + *, + max_retries: int = 5, + base_wait_time: float = 1, + max_wait_time: float = 8, + retry_on_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = ( + requests.Timeout, + requests.ConnectionError, + ), + retry_on_status_codes: Union[int, Tuple[int, ...]] = HTTPStatus.SERVICE_UNAVAILABLE, + **kwargs, +) -> Response: + """Wrapper around requests to retry calls on an endpoint, with exponential backoff. + + Endpoint call is retried on exceptions (ex: connection timeout, proxy error,...) + and/or on specific status codes (ex: service unavailable). If the call failed more + than `max_retries`, the exception is thrown or `raise_for_status` is called on the + response object. + + Re-implement mechanisms from the `backoff` library to avoid adding an external + dependencies to `hugging_face_hub`. See https://github.com/litl/backoff. + + Args: + method (`Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"]`): + HTTP method to perform. + url (`str`): + The URL of the resource to fetch. + max_retries (`int`, *optional*, defaults to `5`): + Maximum number of retries, defaults to 5 (no retries). + base_wait_time (`float`, *optional*, defaults to `1`): + Duration (in seconds) to wait before retrying the first time. + Wait time between retries then grows exponentially, capped by + `max_wait_time`. + max_wait_time (`float`, *optional*, defaults to `8`): + Maximum duration (in seconds) to wait before retrying. + retry_on_exceptions (`Type[Exception]` or `Tuple[Type[Exception]]`, *optional*): + Define which exceptions must be caught to retry the request. Can be a single type or a tuple of types. + By default, retry on `requests.Timeout` and `requests.ConnectionError`. + retry_on_status_codes (`int` or `Tuple[int]`, *optional*, defaults to `503`): + Define on which status codes the request must be retried. By default, only + HTTP 503 Service Unavailable is retried. + **kwargs (`dict`, *optional*): + kwargs to pass to `requests.request`. + + Example: + ``` + >>> from huggingface_hub.utils import http_backoff + + # Same usage as "requests.request". + >>> response = http_backoff("GET", "https://www.google.com") + >>> response.raise_for_status() + + # If you expect a Gateway Timeout from time to time + >>> http_backoff("PUT", upload_url, data=data, retry_on_status_codes=504) + >>> response.raise_for_status() + ``` + + + + When using `requests` it is possible to stream data by passing an iterator to the + `data` argument. On http backoff this is a problem as the iterator is not reset + after a failed call. This issue is mitigated for file objects or any IO streams + by saving the initial position of the cursor (with `data.tell()`) and resetting the + cursor between each call (with `data.seek()`). For arbitrary iterators, http backoff + will fail. If this is a hard constraint for you, please let us know by opening an + issue on [Github](https://github.com/huggingface/huggingface_hub). + + + """ + if isinstance(retry_on_exceptions, type): # Tuple from single exception type + retry_on_exceptions = (retry_on_exceptions,) + + if isinstance(retry_on_status_codes, int): # Tuple from single status code + retry_on_status_codes = (retry_on_status_codes,) + + nb_tries = 0 + sleep_time = base_wait_time + + # If `data` is used and is a file object (or any IO), it will be consumed on the + # first HTTP request. We need to save the initial position so that the full content + # of the file is re-sent on http backoff. See warning tip in docstring. + io_obj_initial_pos = None + if "data" in kwargs and isinstance(kwargs["data"], io.IOBase): + io_obj_initial_pos = kwargs["data"].tell() + + session = get_session() + while True: + nb_tries += 1 + try: + # If `data` is used and is a file object (or any IO), set back cursor to + # initial position. + if io_obj_initial_pos is not None: + kwargs["data"].seek(io_obj_initial_pos) + + # Perform request and return if status_code is not in the retry list. + response = session.request(method=method, url=url, **kwargs) + if response.status_code not in retry_on_status_codes: + return response + + # Wrong status code returned (HTTP 503 for instance) + logger.warning(f"HTTP Error {response.status_code} thrown while requesting {method} {url}") + if nb_tries > max_retries: + response.raise_for_status() # Will raise uncaught exception + # We return response to avoid infinite loop in the corner case where the + # user ask for retry on a status code that doesn't raise_for_status. + return response + + except retry_on_exceptions as err: + logger.warning(f"'{err}' thrown while requesting {method} {url}") + + if isinstance(err, requests.ConnectionError): + reset_sessions() # In case of SSLError it's best to reset the shared requests.Session objects + + if nb_tries > max_retries: + raise err + + # Sleep for X seconds + logger.warning(f"Retrying in {sleep_time}s [Retry {nb_tries}/{max_retries}].") + time.sleep(sleep_time) + + # Update sleep time for next retry + sleep_time = min(max_wait_time, sleep_time * 2) # Exponential backoff + + +def fix_hf_endpoint_in_url(url: str, endpoint: Optional[str]) -> str: + """Replace the default endpoint in a URL by a custom one. + + This is useful when using a proxy and the Hugging Face Hub returns a URL with the default endpoint. + """ + endpoint = endpoint or constants.ENDPOINT + # check if a proxy has been set => if yes, update the returned URL to use the proxy + if endpoint not in (None, constants._HF_DEFAULT_ENDPOINT, constants._HF_DEFAULT_STAGING_ENDPOINT): + url = url.replace(constants._HF_DEFAULT_ENDPOINT, endpoint) + url = url.replace(constants._HF_DEFAULT_STAGING_ENDPOINT, endpoint) + return url diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py new file mode 100644 index 0000000000000000000000000000000000000000..f7ab4fe7cba9bd13f01d9c81854a00fd30b7f0d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_pagination.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle pagination on Huggingface Hub.""" + +from typing import Dict, Iterable, Optional + +import requests + +from . import get_session, hf_raise_for_status, logging + + +logger = logging.get_logger(__name__) + + +def paginate(path: str, params: Dict, headers: Dict) -> Iterable: + """Fetch a list of models/datasets/spaces and paginate through results. + + This is using the same "Link" header format as GitHub. + See: + - https://requests.readthedocs.io/en/latest/api/#requests.Response.links + - https://docs.github.com/en/rest/guides/traversing-with-pagination#link-header + """ + session = get_session() + r = session.get(path, params=params, headers=headers) + hf_raise_for_status(r) + yield from r.json() + + # Follow pages + # Next link already contains query params + next_page = _get_next_page(r) + while next_page is not None: + logger.debug(f"Pagination detected. Requesting next page: {next_page}") + r = session.get(next_page, headers=headers) + hf_raise_for_status(r) + yield from r.json() + next_page = _get_next_page(r) + + +def _get_next_page(response: requests.Response) -> Optional[str]: + return response.links.get("next", {}).get("url") diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_paths.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..411d7d52bb3edc2be948d85267e21ce2be91d460 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_paths.py @@ -0,0 +1,118 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle paths in Huggingface Hub.""" + +from fnmatch import fnmatch +from pathlib import Path +from typing import Callable, Generator, Iterable, List, Optional, TypeVar, Union + + +T = TypeVar("T") + +IGNORE_GIT_FOLDER_PATTERNS = [".git", ".git/*", "*/.git", "**/.git/**"] + + +def filter_repo_objects( + items: Iterable[T], + *, + allow_patterns: Optional[Union[List[str], str]] = None, + ignore_patterns: Optional[Union[List[str], str]] = None, + key: Optional[Callable[[T], str]] = None, +) -> Generator[T, None, None]: + """Filter repo objects based on an allowlist and a denylist. + + Input must be a list of paths (`str` or `Path`) or a list of arbitrary objects. + In the later case, `key` must be provided and specifies a function of one argument + that is used to extract a path from each element in iterable. + + Patterns are Unix shell-style wildcards which are NOT regular expressions. See + https://docs.python.org/3/library/fnmatch.html for more details. + + Args: + items (`Iterable`): + List of items to filter. + allow_patterns (`str` or `List[str]`, *optional*): + Patterns constituting the allowlist. If provided, item paths must match at + least one pattern from the allowlist. + ignore_patterns (`str` or `List[str]`, *optional*): + Patterns constituting the denylist. If provided, item paths must not match + any patterns from the denylist. + key (`Callable[[T], str]`, *optional*): + Single-argument function to extract a path from each item. If not provided, + the `items` must already be `str` or `Path`. + + Returns: + Filtered list of objects, as a generator. + + Raises: + :class:`ValueError`: + If `key` is not provided and items are not `str` or `Path`. + + Example usage with paths: + ```python + >>> # Filter only PDFs that are not hidden. + >>> list(filter_repo_objects( + ... ["aaa.PDF", "bbb.jpg", ".ccc.pdf", ".ddd.png"], + ... allow_patterns=["*.pdf"], + ... ignore_patterns=[".*"], + ... )) + ["aaa.pdf"] + ``` + + Example usage with objects: + ```python + >>> list(filter_repo_objects( + ... [ + ... CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf") + ... CommitOperationAdd(path_or_fileobj="/tmp/bbb.jpg", path_in_repo="bbb.jpg") + ... CommitOperationAdd(path_or_fileobj="/tmp/.ccc.pdf", path_in_repo=".ccc.pdf") + ... CommitOperationAdd(path_or_fileobj="/tmp/.ddd.png", path_in_repo=".ddd.png") + ... ], + ... allow_patterns=["*.pdf"], + ... ignore_patterns=[".*"], + ... key=lambda x: x.repo_in_path + ... )) + [CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")] + ``` + """ + if isinstance(allow_patterns, str): + allow_patterns = [allow_patterns] + + if isinstance(ignore_patterns, str): + ignore_patterns = [ignore_patterns] + + if key is None: + + def _identity(item: T) -> str: + if isinstance(item, str): + return item + if isinstance(item, Path): + return str(item) + raise ValueError(f"Please provide `key` argument in `filter_repo_objects`: `{item}` is not a string.") + + key = _identity # Items must be `str` or `Path`, otherwise raise ValueError + + for item in items: + path = key(item) + + # Skip if there's an allowlist and path doesn't match any + if allow_patterns is not None and not any(fnmatch(path, r) for r in allow_patterns): + continue + + # Skip if there's a denylist and path matches any + if ignore_patterns is not None and any(fnmatch(path, r) for r in ignore_patterns): + continue + + yield item diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_safetensors.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_safetensors.py new file mode 100644 index 0000000000000000000000000000000000000000..d37e8f76fee25976d48ad591fe8afb277ae6ef38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_safetensors.py @@ -0,0 +1,124 @@ +import functools +import operator +from collections import defaultdict +from dataclasses import dataclass, field +from typing import Dict, List, Literal, Optional, Tuple + + +FILENAME_T = str +TENSOR_NAME_T = str +DTYPE_T = Literal["F64", "F32", "F16", "BF16", "I64", "I32", "I16", "I8", "U8", "BOOL"] + + +class SafetensorsParsingError(Exception): + """Raised when failing to parse a safetensors file metadata. + + This can be the case if the file is not a safetensors file or does not respect the specification. + """ + + +class NotASafetensorsRepoError(Exception): + """Raised when a repo is not a Safetensors repo i.e. doesn't have either a `model.safetensors` or a + `model.safetensors.index.json` file. + """ + + +@dataclass +class TensorInfo: + """Information about a tensor. + + For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format. + + Attributes: + dtype (`str`): + The data type of the tensor ("F64", "F32", "F16", "BF16", "I64", "I32", "I16", "I8", "U8", "BOOL"). + shape (`List[int]`): + The shape of the tensor. + data_offsets (`Tuple[int, int]`): + The offsets of the data in the file as a tuple `[BEGIN, END]`. + parameter_count (`int`): + The number of parameters in the tensor. + """ + + dtype: DTYPE_T + shape: List[int] + data_offsets: Tuple[int, int] + parameter_count: int = field(init=False) + + def __post_init__(self) -> None: + # Taken from https://stackoverflow.com/a/13840436 + try: + self.parameter_count = functools.reduce(operator.mul, self.shape) + except TypeError: + self.parameter_count = 1 # scalar value has no shape + + +@dataclass +class SafetensorsFileMetadata: + """Metadata for a Safetensors file hosted on the Hub. + + This class is returned by [`parse_safetensors_file_metadata`]. + + For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format. + + Attributes: + metadata (`Dict`): + The metadata contained in the file. + tensors (`Dict[str, TensorInfo]`): + A map of all tensors. Keys are tensor names and values are information about the corresponding tensor, as a + [`TensorInfo`] object. + parameter_count (`Dict[str, int]`): + A map of the number of parameters per data type. Keys are data types and values are the number of parameters + of that data type. + """ + + metadata: Dict[str, str] + tensors: Dict[TENSOR_NAME_T, TensorInfo] + parameter_count: Dict[DTYPE_T, int] = field(init=False) + + def __post_init__(self) -> None: + parameter_count: Dict[DTYPE_T, int] = defaultdict(int) + for tensor in self.tensors.values(): + parameter_count[tensor.dtype] += tensor.parameter_count + self.parameter_count = dict(parameter_count) + + +@dataclass +class SafetensorsRepoMetadata: + """Metadata for a Safetensors repo. + + A repo is considered to be a Safetensors repo if it contains either a 'model.safetensors' weight file (non-shared + model) or a 'model.safetensors.index.json' index file (sharded model) at its root. + + This class is returned by [`get_safetensors_metadata`]. + + For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format. + + Attributes: + metadata (`Dict`, *optional*): + The metadata contained in the 'model.safetensors.index.json' file, if it exists. Only populated for sharded + models. + sharded (`bool`): + Whether the repo contains a sharded model or not. + weight_map (`Dict[str, str]`): + A map of all weights. Keys are tensor names and values are filenames of the files containing the tensors. + files_metadata (`Dict[str, SafetensorsFileMetadata]`): + A map of all files metadata. Keys are filenames and values are the metadata of the corresponding file, as + a [`SafetensorsFileMetadata`] object. + parameter_count (`Dict[str, int]`): + A map of the number of parameters per data type. Keys are data types and values are the number of parameters + of that data type. + """ + + metadata: Optional[Dict] + sharded: bool + weight_map: Dict[TENSOR_NAME_T, FILENAME_T] # tensor name -> filename + files_metadata: Dict[FILENAME_T, SafetensorsFileMetadata] # filename -> metadata + parameter_count: Dict[DTYPE_T, int] = field(init=False) + + def __post_init__(self) -> None: + parameter_count: Dict[DTYPE_T, int] = defaultdict(int) + for file_metadata in self.files_metadata.values(): + for dtype, nb_parameters_ in file_metadata.parameter_count.items(): + parameter_count[dtype] += nb_parameters_ + self.parameter_count = dict(parameter_count) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_subprocess.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_subprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..a09e0d58868ecd699ea3a3e503a8a702d25c8ea5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_subprocess.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +"""Contains utilities to easily handle subprocesses in `huggingface_hub`.""" + +import os +import subprocess +import sys +from contextlib import contextmanager +from io import StringIO +from pathlib import Path +from typing import IO, Generator, List, Optional, Tuple, Union + +from .logging import get_logger + + +logger = get_logger(__name__) + + +@contextmanager +def capture_output() -> Generator[StringIO, None, None]: + """Capture output that is printed to terminal. + + Taken from https://stackoverflow.com/a/34738440 + + Example: + ```py + >>> with capture_output() as output: + ... print("hello world") + >>> assert output.getvalue() == "hello world\n" + ``` + """ + output = StringIO() + previous_output = sys.stdout + sys.stdout = output + yield output + sys.stdout = previous_output + + +def run_subprocess( + command: Union[str, List[str]], + folder: Optional[Union[str, Path]] = None, + check=True, + **kwargs, +) -> subprocess.CompletedProcess: + """ + Method to run subprocesses. Calling this will capture the `stderr` and `stdout`, + please call `subprocess.run` manually in case you would like for them not to + be captured. + + Args: + command (`str` or `List[str]`): + The command to execute as a string or list of strings. + folder (`str`, *optional*): + The folder in which to run the command. Defaults to current working + directory (from `os.getcwd()`). + check (`bool`, *optional*, defaults to `True`): + Setting `check` to `True` will raise a `subprocess.CalledProcessError` + when the subprocess has a non-zero exit code. + kwargs (`Dict[str]`): + Keyword arguments to be passed to the `subprocess.run` underlying command. + + Returns: + `subprocess.CompletedProcess`: The completed process. + """ + if isinstance(command, str): + command = command.split() + + if isinstance(folder, Path): + folder = str(folder) + + return subprocess.run( + command, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + check=check, + encoding="utf-8", + errors="replace", # if not utf-8, replace char by � + cwd=folder or os.getcwd(), + **kwargs, + ) + + +@contextmanager +def run_interactive_subprocess( + command: Union[str, List[str]], + folder: Optional[Union[str, Path]] = None, + **kwargs, +) -> Generator[Tuple[IO[str], IO[str]], None, None]: + """Run a subprocess in an interactive mode in a context manager. + + Args: + command (`str` or `List[str]`): + The command to execute as a string or list of strings. + folder (`str`, *optional*): + The folder in which to run the command. Defaults to current working + directory (from `os.getcwd()`). + kwargs (`Dict[str]`): + Keyword arguments to be passed to the `subprocess.run` underlying command. + + Returns: + `Tuple[IO[str], IO[str]]`: A tuple with `stdin` and `stdout` to interact + with the process (input and output are utf-8 encoded). + + Example: + ```python + with _interactive_subprocess("git credential-store get") as (stdin, stdout): + # Write to stdin + stdin.write("url=hf.co\nusername=obama\n".encode("utf-8")) + stdin.flush() + + # Read from stdout + output = stdout.read().decode("utf-8") + ``` + """ + if isinstance(command, str): + command = command.split() + + with subprocess.Popen( + command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + errors="replace", # if not utf-8, replace char by � + cwd=folder or os.getcwd(), + **kwargs, + ) as process: + assert process.stdin is not None, "subprocess is opened as subprocess.PIPE" + assert process.stdout is not None, "subprocess is opened as subprocess.PIPE" + yield process.stdin, process.stdout diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_token.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_token.py new file mode 100644 index 0000000000000000000000000000000000000000..3218bb45c0737f67912c9c257734c463f5871255 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_token.py @@ -0,0 +1,130 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains an helper to get the token from machine (env variable, secret or config file).""" + +import os +import warnings +from pathlib import Path +from threading import Lock +from typing import Optional + +from .. import constants +from ._runtime import is_google_colab + + +_IS_GOOGLE_COLAB_CHECKED = False +_GOOGLE_COLAB_SECRET_LOCK = Lock() +_GOOGLE_COLAB_SECRET: Optional[str] = None + + +def get_token() -> Optional[str]: + """ + Get token if user is logged in. + + Note: in most cases, you should use [`huggingface_hub.utils.build_hf_headers`] instead. This method is only useful + if you want to retrieve the token for other purposes than sending an HTTP request. + + Token is retrieved in priority from the `HF_TOKEN` environment variable. Otherwise, we read the token file located + in the Hugging Face home folder. Returns None if user is not logged in. To log in, use [`login`] or + `huggingface-cli login`. + + Returns: + `str` or `None`: The token, `None` if it doesn't exist. + """ + return _get_token_from_google_colab() or _get_token_from_environment() or _get_token_from_file() + + +def _get_token_from_google_colab() -> Optional[str]: + """Get token from Google Colab secrets vault using `google.colab.userdata.get(...)`. + + Token is read from the vault only once per session and then stored in a global variable to avoid re-requesting + access to the vault. + """ + if not is_google_colab(): + return None + + # `google.colab.userdata` is not thread-safe + # This can lead to a deadlock if multiple threads try to access it at the same time + # (typically when using `snapshot_download`) + # => use a lock + # See https://github.com/huggingface/huggingface_hub/issues/1952 for more details. + with _GOOGLE_COLAB_SECRET_LOCK: + global _GOOGLE_COLAB_SECRET + global _IS_GOOGLE_COLAB_CHECKED + + if _IS_GOOGLE_COLAB_CHECKED: # request access only once + return _GOOGLE_COLAB_SECRET + + try: + from google.colab import userdata + from google.colab.errors import Error as ColabError + except ImportError: + return None + + try: + token = userdata.get("HF_TOKEN") + _GOOGLE_COLAB_SECRET = _clean_token(token) + except userdata.NotebookAccessError: + # Means the user has a secret call `HF_TOKEN` and got a popup "please grand access to HF_TOKEN" and refused it + # => warn user but ignore error => do not re-request access to user + warnings.warn( + "\nAccess to the secret `HF_TOKEN` has not been granted on this notebook." + "\nYou will not be requested again." + "\nPlease restart the session if you want to be prompted again." + ) + _GOOGLE_COLAB_SECRET = None + except userdata.SecretNotFoundError: + # Means the user did not define a `HF_TOKEN` secret => warn + warnings.warn( + "\nThe secret `HF_TOKEN` does not exist in your Colab secrets." + "\nTo authenticate with the Hugging Face Hub, create a token in your settings tab " + "(https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session." + "\nYou will be able to reuse this secret in all of your notebooks." + "\nPlease note that authentication is recommended but still optional to access public models or datasets." + ) + _GOOGLE_COLAB_SECRET = None + except ColabError as e: + # Something happen but we don't know what => recommend to open a GitHub issue + warnings.warn( + f"\nError while fetching `HF_TOKEN` secret value from your vault: '{str(e)}'." + "\nYou are not authenticated with the Hugging Face Hub in this notebook." + "\nIf the error persists, please let us know by opening an issue on GitHub " + "(https://github.com/huggingface/huggingface_hub/issues/new)." + ) + _GOOGLE_COLAB_SECRET = None + + _IS_GOOGLE_COLAB_CHECKED = True + return _GOOGLE_COLAB_SECRET + + +def _get_token_from_environment() -> Optional[str]: + # `HF_TOKEN` has priority (keep `HUGGING_FACE_HUB_TOKEN` for backward compatibility) + return _clean_token(os.environ.get("HF_TOKEN") or os.environ.get("HUGGING_FACE_HUB_TOKEN")) + + +def _get_token_from_file() -> Optional[str]: + try: + return _clean_token(Path(constants.HF_TOKEN_PATH).read_text()) + except FileNotFoundError: + return None + + +def _clean_token(token: Optional[str]) -> Optional[str]: + """Clean token by removing trailing and leading spaces and newlines. + + If token is an empty string, return None. + """ + if token is None: + return None + return token.replace("\r", "").replace("\n", "").strip() or None diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/_typing.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..ae502b825bcb900ea076ffe1b0fe1078569821c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/_typing.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Handle typing imports based on system compatibility.""" + +from typing import Any, Callable, Literal, TypeVar + + +HTTP_METHOD_T = Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"] + +# type hint meaning "function signature not changed by decorator" +CallableT = TypeVar("CallableT", bound=Callable) + +_JSON_SERIALIZABLE_TYPES = (int, float, str, bool, type(None)) + + +def is_jsonable(obj: Any) -> bool: + """Check if an object is JSON serializable. + + This is a weak check, as it does not check for the actual JSON serialization, but only for the types of the object. + It works correctly for basic use cases but do not guarantee an exhaustive check. + + Object is considered to be recursively json serializable if: + - it is an instance of int, float, str, bool, or NoneType + - it is a list or tuple and all its items are json serializable + - it is a dict and all its keys are strings and all its values are json serializable + """ + try: + if isinstance(obj, _JSON_SERIALIZABLE_TYPES): + return True + if isinstance(obj, (list, tuple)): + return all(is_jsonable(item) for item in obj) + if isinstance(obj, dict): + return all(isinstance(key, str) and is_jsonable(value) for key, value in obj.items()) + if hasattr(obj, "__json__"): + return True + return False + except RecursionError: + return False diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/insecure_hashlib.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/insecure_hashlib.py new file mode 100644 index 0000000000000000000000000000000000000000..f232ee0adcfc52dcc18b5ea4d9c913b206521f71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/insecure_hashlib.py @@ -0,0 +1,34 @@ +# Taken from https://github.com/mlflow/mlflow/pull/10119 +# +# DO NOT use this function for security purposes (e.g., password hashing). +# +# In Python >= 3.9, insecure hashing algorithms such as MD5 fail in FIPS-compliant +# environments unless `usedforsecurity=False` is explicitly passed. +# +# References: +# - https://github.com/mlflow/mlflow/issues/9905 +# - https://github.com/mlflow/mlflow/pull/10119 +# - https://docs.python.org/3/library/hashlib.html +# - https://github.com/huggingface/transformers/pull/27038 +# +# Usage: +# ```python +# # Use +# from huggingface_hub.utils.insecure_hashlib import sha256 +# # instead of +# from hashlib import sha256 +# +# # Use +# from huggingface_hub.utils import insecure_hashlib +# # instead of +# import hashlib +# ``` +import functools +import hashlib +import sys + + +_kwargs = {"usedforsecurity": False} if sys.version_info >= (3, 9) else {} +md5 = functools.partial(hashlib.md5, **_kwargs) +sha1 = functools.partial(hashlib.sha1, **_kwargs) +sha256 = functools.partial(hashlib.sha256, **_kwargs) diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/logging.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..3aafdf148135397556b4bb762862377eafdafd14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/logging.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Logging utilities.""" + +import logging +import os +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from typing import Optional + + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _get_default_logging_level(): + """ + If `HF_HUB_VERBOSITY` env var is set to one of the valid choices return that as the new default level. If it is not + - fall back to `_default_log_level` + """ + env_level_str = os.getenv("HF_HUB_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option HF_HUB_VERBOSITY={env_level_str}, has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _configure_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(logging.StreamHandler()) + library_root_logger.setLevel(_get_default_logging_level()) + + +def _reset_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(logging.NOTSET) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Returns a logger with the specified name. This function is not supposed + to be directly accessed by library users. + + Args: + name (`str`, *optional*): + The name of the logger to get, usually the filename + + Example: + + ```python + >>> from huggingface_hub import get_logger + + >>> logger = get_logger(__file__) + >>> logger.set_verbosity_info() + ``` + """ + + if name is None: + name = _get_library_name() + + return logging.getLogger(name) + + +def get_verbosity() -> int: + """Return the current level for the HuggingFace Hub's root logger. + + Returns: + Logging level, e.g., `huggingface_hub.logging.DEBUG` and + `huggingface_hub.logging.INFO`. + + + + HuggingFace Hub has following logging levels: + + - `huggingface_hub.logging.CRITICAL`, `huggingface_hub.logging.FATAL` + - `huggingface_hub.logging.ERROR` + - `huggingface_hub.logging.WARNING`, `huggingface_hub.logging.WARN` + - `huggingface_hub.logging.INFO` + - `huggingface_hub.logging.DEBUG` + + + """ + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """ + Sets the level for the HuggingFace Hub's root logger. + + Args: + verbosity (`int`): + Logging level, e.g., `huggingface_hub.logging.DEBUG` and + `huggingface_hub.logging.INFO`. + """ + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """ + Sets the verbosity to `logging.INFO`. + """ + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """ + Sets the verbosity to `logging.WARNING`. + """ + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """ + Sets the verbosity to `logging.DEBUG`. + """ + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """ + Sets the verbosity to `logging.ERROR`. + """ + return set_verbosity(ERROR) + + +def disable_propagation() -> None: + """ + Disable propagation of the library log outputs. Note that log propagation is + disabled by default. + """ + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """ + Enable propagation of the library log outputs. Please disable the + HuggingFace Hub's default handler to prevent double logging if the root + logger has been configured. + """ + _get_library_root_logger().propagate = True + + +_configure_library_root_logger() diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/sha.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/sha.py new file mode 100644 index 0000000000000000000000000000000000000000..233ab074e69a47de9a443a458ce44e1429a1e22c --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/sha.py @@ -0,0 +1,29 @@ +"""Utilities to efficiently compute the SHA 256 hash of a bunch of bytes.""" + +from typing import BinaryIO, Optional + +from .insecure_hashlib import sha256 + + +def sha_fileobj(fileobj: BinaryIO, chunk_size: Optional[int] = None) -> bytes: + """ + Computes the sha256 hash of the given file object, by chunks of size `chunk_size`. + + Args: + fileobj (file-like object): + The File object to compute sha256 for, typically obtained with `open(path, "rb")` + chunk_size (`int`, *optional*): + The number of bytes to read from `fileobj` at once, defaults to 1MB. + + Returns: + `bytes`: `fileobj`'s sha256 hash as bytes + """ + chunk_size = chunk_size if chunk_size is not None else 1024 * 1024 + + sha = sha256() + while True: + chunk = fileobj.read(chunk_size) + sha.update(chunk) + if not chunk: + break + return sha.digest() diff --git a/venv/lib/python3.10/site-packages/huggingface_hub/utils/tqdm.py b/venv/lib/python3.10/site-packages/huggingface_hub/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..da1e421a21e65c04b7c53efd8f95d8df4f663473 --- /dev/null +++ b/venv/lib/python3.10/site-packages/huggingface_hub/utils/tqdm.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +"""Utility helpers to handle progress bars in `huggingface_hub`. + +Example: + 1. Use `huggingface_hub.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`. + 2. To disable progress bars, either use `disable_progress_bars()` helper or set the + environment variable `HF_HUB_DISABLE_PROGRESS_BARS` to 1. + 3. To re-enable progress bars, use `enable_progress_bars()`. + 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`. + +NOTE: Environment variable `HF_HUB_DISABLE_PROGRESS_BARS` has the priority. + +Example: + ```py + from huggingface_hub.utils import ( + are_progress_bars_disabled, + disable_progress_bars, + enable_progress_bars, + tqdm, + ) + + # Disable progress bars globally + disable_progress_bars() + + # Use as normal `tqdm` + for _ in tqdm(range(5)): + do_something() + + # Still not showing progress bars, as `disable=False` is overwritten to `True`. + for _ in tqdm(range(5), disable=False): + do_something() + + are_progress_bars_disabled() # True + + # Re-enable progress bars globally + enable_progress_bars() + + # Progress bar will be shown ! + for _ in tqdm(range(5)): + do_something() + ``` +""" + +import io +import warnings +from contextlib import contextmanager +from pathlib import Path +from typing import Iterator, Optional, Union + +from tqdm.auto import tqdm as old_tqdm + +from ..constants import HF_HUB_DISABLE_PROGRESS_BARS + + +# `HF_HUB_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_hub_progress_bars_disabled` +# is a `bool`. If `HF_HUB_DISABLE_PROGRESS_BARS` is set to True or False, it has priority. +# If `HF_HUB_DISABLE_PROGRESS_BARS` is None, it means the user have not set the +# environment variable and is free to enable/disable progress bars programmatically. +# TL;DR: env variable has priority over code. +# +# By default, progress bars are enabled. +_hf_hub_progress_bars_disabled: bool = HF_HUB_DISABLE_PROGRESS_BARS or False + + +def disable_progress_bars() -> None: + """ + Disable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment + variable has been set. + + Use [`~utils.enable_progress_bars`] to re-enable them. + """ + if HF_HUB_DISABLE_PROGRESS_BARS is False: + warnings.warn( + "Cannot disable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=0` is set and has" + " priority." + ) + return + global _hf_hub_progress_bars_disabled + _hf_hub_progress_bars_disabled = True + + +def enable_progress_bars() -> None: + """ + Enable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment + variable has been set. + + Use [`~utils.disable_progress_bars`] to disable them. + """ + if HF_HUB_DISABLE_PROGRESS_BARS is True: + warnings.warn( + "Cannot enable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=1` is set and has" + " priority." + ) + return + global _hf_hub_progress_bars_disabled + _hf_hub_progress_bars_disabled = False + + +def are_progress_bars_disabled() -> bool: + """Return whether progress bars are globally disabled or not. + + Progress bars used in `huggingface_hub` can be enable or disabled globally using [`~utils.enable_progress_bars`] + and [`~utils.disable_progress_bars`] or by setting `HF_HUB_DISABLE_PROGRESS_BARS` as environment variable. + """ + global _hf_hub_progress_bars_disabled + return _hf_hub_progress_bars_disabled + + +class tqdm(old_tqdm): + """ + Class to override `disable` argument in case progress bars are globally disabled. + + Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324. + """ + + def __init__(self, *args, **kwargs): + if are_progress_bars_disabled(): + kwargs["disable"] = True + super().__init__(*args, **kwargs) + + def __delattr__(self, attr: str) -> None: + """Fix for https://github.com/huggingface/huggingface_hub/issues/1603""" + try: + super().__delattr__(attr) + except AttributeError: + if attr != "_lock": + raise + + +@contextmanager +def tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]: + """ + Open a file as binary and wrap the `read` method to display a progress bar when it's streamed. + + First implemented in `transformers` in 2019 but removed when switched to git-lfs. Used in `huggingface_hub` to show + progress bar when uploading an LFS file to the Hub. See github.com/huggingface/transformers/pull/2078#discussion_r354739608 + for implementation details. + + Note: currently implementation handles only files stored on disk as it is the most common use case. Could be + extended to stream any `BinaryIO` object but we might have to debug some corner cases. + + Example: + ```py + >>> with tqdm_stream_file("config.json") as f: + >>> requests.put(url, data=f) + config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s] + ``` + """ + if isinstance(path, str): + path = Path(path) + + with path.open("rb") as f: + total_size = path.stat().st_size + pbar = tqdm( + unit="B", + unit_scale=True, + total=total_size, + initial=0, + desc=path.name, + ) + + f_read = f.read + + def _inner_read(size: Optional[int] = -1) -> bytes: + data = f_read(size) + pbar.update(len(data)) + return data + + f.read = _inner_read # type: ignore + + yield f + + pbar.close() diff --git a/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3fb59225af83edc8ed40a93bcc87cf6df248b6b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a46c154b9d2487df47de9329348826f8167a6eaf Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8c1c615e80ff0f44147e7f91b933a12b14a9668 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/arrays/__init__.py b/venv/lib/python3.10/site-packages/pandas/arrays/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a11755275d00e070bea6ab73a881b98d0b976551 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/arrays/__init__.py @@ -0,0 +1,53 @@ +""" +All of pandas' ExtensionArrays. + +See :ref:`extending.extension-types` for more. +""" +from pandas.core.arrays import ( + ArrowExtensionArray, + ArrowStringArray, + BooleanArray, + Categorical, + DatetimeArray, + FloatingArray, + IntegerArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + StringArray, + TimedeltaArray, +) + +__all__ = [ + "ArrowExtensionArray", + "ArrowStringArray", + "BooleanArray", + "Categorical", + "DatetimeArray", + "FloatingArray", + "IntegerArray", + "IntervalArray", + "NumpyExtensionArray", + "PeriodArray", + "SparseArray", + "StringArray", + "TimedeltaArray", +] + + +def __getattr__(name: str) -> type[NumpyExtensionArray]: + if name == "PandasArray": + # GH#53694 + import warnings + + from pandas.util._exceptions import find_stack_level + + warnings.warn( + "PandasArray has been renamed NumpyExtensionArray. Use that " + "instead. This alias will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return NumpyExtensionArray + raise AttributeError(f"module 'pandas.arrays' has no attribute '{name}'") diff --git a/venv/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..406d01a5172fc95b6dc8fda819dc42c036cee7f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..268c00e0364506fa384d9b019001de22beee5332 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..014c08420ff310d41f114d611d3b4c3743f0aa51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA @@ -0,0 +1,370 @@ +Metadata-Version: 2.1 +Name: pathvalidate +Version: 3.2.0 +Summary: pathvalidate is a Python library to sanitize/validate a string such as filenames/file-paths/etc. +Home-page: https://github.com/thombashi/pathvalidate +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Documentation, https://pathvalidate.rtfd.io/ +Project-URL: Source, https://github.com/thombashi/pathvalidate +Project-URL: Tracker, https://github.com/thombashi/pathvalidate/issues +Project-URL: Changlog, https://github.com/thombashi/pathvalidate/releases +Keywords: file,path,validation,validator,sanitization,sanitizer +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Filesystems +Classifier: Topic :: Text Processing +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: docs +Requires-Dist: sphinx-rtd-theme >=1.2.2 ; extra == 'docs' +Requires-Dist: Sphinx >=2.4 ; extra == 'docs' +Requires-Dist: urllib3 <2 ; extra == 'docs' +Provides-Extra: test +Requires-Dist: allpairspy >=2 ; extra == 'test' +Requires-Dist: click >=6.2 ; extra == 'test' +Requires-Dist: Faker >=1.0.8 ; extra == 'test' +Requires-Dist: pytest >=6.0.1 ; extra == 'test' +Requires-Dist: pytest-md-report >=0.4.1 ; extra == 'test' +Requires-Dist: pytest-discord >=0.1.4 ; (python_version >= "3.7") and extra == 'test' + +.. contents:: **pathvalidate** + :backlinks: top + :depth: 2 + +Summary +========= +`pathvalidate `__ is a Python library to sanitize/validate a string such as filenames/file-paths/etc. + +.. image:: https://badge.fury.io/py/pathvalidate.svg + :target: https://badge.fury.io/py/pathvalidate + :alt: PyPI package version + +.. image:: https://anaconda.org/thombashi/pathvalidate/badges/version.svg + :target: https://anaconda.org/thombashi/pathvalidate + :alt: conda package version + +.. image:: https://img.shields.io/pypi/pyversions/pathvalidate.svg + :target: https://pypi.org/project/pathvalidate + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/pathvalidate.svg + :target: https://pypi.org/project/pathvalidate + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/pathvalidate/workflows/Tests/badge.svg + :target: https://github.com/thombashi/pathvalidate/actions?query=workflow%3ATests + :alt: Linux/macOS/Windows CI status + +.. image:: https://coveralls.io/repos/github/thombashi/pathvalidate/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/pathvalidate?branch=master + :alt: Test coverage: coveralls + +.. image:: https://github.com/thombashi/pathvalidate/actions/workflows/github-code-scanning/codeql/badge.svg + :target: https://github.com/thombashi/pathvalidate/actions/workflows/github-code-scanning/codeql + :alt: CodeQL + +Features +--------- +- Sanitize/Validate a string as a: + - file name + - file path +- Sanitize will do: + - Remove invalid characters for a target platform + - Replace reserved names for a target platform + - Normalize + - Remove unprintable characters +- Argument validator/sanitizer for ``argparse`` and ``click`` +- Multi platform support: + - ``Linux`` + - ``Windows`` + - ``macOS`` + - ``POSIX`` + - ``universal`` (platform independent) +- Multibyte character support + +Examples +========== +Sanitize a filename +--------------------- +:Sample Code: + .. code-block:: python + + from pathvalidate import sanitize_filename + + fname = "fi:l*e/p\"a?t>h|.t {sanitize_filename(fname)}\n") + + fname = "\0_a*b:ce%f/(g)h+i_0.txt" + print(f"{fname} -> {sanitize_filename(fname)}\n") + +:Output: + .. code-block:: + + fi:l*e/p"a?t>h|.t filepath.txt + + _a*b:ce%f/(g)h+i_0.txt -> _abcde%f(g)h+i_0.txt + +The default target ``platform`` is ``universal``. +i.e. the sanitized file name is valid for any platform. + +Sanitize a filepath +--------------------- +:Sample Code: + .. code-block:: python + + from pathvalidate import sanitize_filepath + + fpath = "fi:l*e/p\"a?t>h|.t {sanitize_filepath(fpath)}\n") + + fpath = "\0_a*b:ce%f/(g)h+i_0.txt" + print(f"{fpath} -> {sanitize_filepath(fpath)}\n") + +:Output: + .. code-block:: + + fi:l*e/p"a?t>h|.t file/path.txt + + _a*b:ce%f/(g)h+i_0.txt -> _abcde%f/(g)h+i_0.txt + +Validate a filename +--------------------- +:Sample Code: + .. code-block:: python + + import sys + from pathvalidate import ValidationError, validate_filename + + try: + validate_filename("fi:l*e/p\"a?t>h|.th|.th|.t None: + if filename: + click.echo(f"filename: {filename}") + if filepath: + click.echo(f"filepath: {filepath}") + + + if __name__ == "__main__": + cli() + +:Output: + .. code-block:: + + $ ./examples/click_validate.py --filename ab + filename: ab + $ ./examples/click_validate.py --filepath e?g + Usage: click_validate.py [OPTIONS] + Try 'click_validate.py --help' for help. + + Error: Invalid value for '--filename': [PV1100] invalid characters found: invalids=('?'), value='e?g', platform=Windows + +filename/filepath sanitizer for ``click`` +------------------------------------------- +:Sample Code: + .. code-block:: python + + import click + + from pathvalidate.click import sanitize_filename_arg, sanitize_filepath_arg + + + @click.command() + @click.option("--filename", callback=sanitize_filename_arg) + @click.option("--filepath", callback=sanitize_filepath_arg) + def cli(filename, filepath): + if filename: + click.echo(f"filename: {filename}") + if filepath: + click.echo(f"filepath: {filepath}") + + + if __name__ == "__main__": + cli() + +:Output: + .. code-block:: + + $ ./examples/click_sanitize.py --filename a/b + filename: ab + +For more information +---------------------- +More examples can be found at +https://pathvalidate.rtfd.io/en/latest/pages/examples/index.html + +Installation +============ +Installation: pip +------------------------------ +:: + + pip install pathvalidate + +Installation: conda +------------------------------ +:: + + conda install -c thombashi pathvalidate + +Installation: apt +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-pathvalidate + + +Dependencies +============ +Python 3.7+ +no external dependencies. + +Documentation +=============== +https://pathvalidate.rtfd.io/ + +Sponsors +==================================== +.. image:: https://avatars.githubusercontent.com/u/44389260?s=48&u=6da7176e51ae2654bcfd22564772ef8a3bb22318&v=4 + :target: https://github.com/chasbecker + :alt: Charles Becker (chasbecker) +.. image:: https://avatars.githubusercontent.com/u/9919?s=48&v=4 + :target: https://github.com/github + :alt: onetime: GitHub (github) +.. image:: https://avatars.githubusercontent.com/u/46711571?s=48&u=57687c0e02d5d6e8eeaf9177f7b7af4c9f275eb5&v=4 + :target: https://github.com/Arturi0 + :alt: onetime: Arturi0 +.. image:: https://avatars.githubusercontent.com/u/3658062?s=48&v=4 + :target: https://github.com/b4tman + :alt: onetime: Dmitry Belyaev (b4tman) + +`Become a sponsor `__ + diff --git a/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..472d39f5440216b77e622c081c7c20454a991771 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD @@ -0,0 +1,35 @@ +pathvalidate-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pathvalidate-3.2.0.dist-info/LICENSE,sha256=qT11vLB3TimQEGOAytrW3LLeGTxV1DX_xWujRaCLHcI,1084 +pathvalidate-3.2.0.dist-info/METADATA,sha256=Kc0RTAOHjVPeTIb-Fv8g162B0RcyDzI_Jj2nD9J8Gdk,11747 +pathvalidate-3.2.0.dist-info/RECORD,, +pathvalidate-3.2.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +pathvalidate-3.2.0.dist-info/top_level.txt,sha256=AtoiECsrk-xZknk3ruLi-UweWuXhbKeEGDWFwMcK_ks,13 +pathvalidate/__init__.py,sha256=R8x0yEBF3dfwpTlGe1TJZ9XgOmO-tKGoEvpZgNA83Ys,1926 +pathvalidate/__pycache__/__init__.cpython-310.pyc,, +pathvalidate/__pycache__/__version__.cpython-310.pyc,, +pathvalidate/__pycache__/_base.cpython-310.pyc,, +pathvalidate/__pycache__/_common.cpython-310.pyc,, +pathvalidate/__pycache__/_const.cpython-310.pyc,, +pathvalidate/__pycache__/_filename.cpython-310.pyc,, +pathvalidate/__pycache__/_filepath.cpython-310.pyc,, +pathvalidate/__pycache__/_ltsv.cpython-310.pyc,, +pathvalidate/__pycache__/_symbol.cpython-310.pyc,, +pathvalidate/__pycache__/_types.cpython-310.pyc,, +pathvalidate/__pycache__/argparse.cpython-310.pyc,, +pathvalidate/__pycache__/click.cpython-310.pyc,, +pathvalidate/__pycache__/error.cpython-310.pyc,, +pathvalidate/__pycache__/handler.cpython-310.pyc,, +pathvalidate/__version__.py,sha256=R8MJHDvfFVYjKEFUDzFulsQ9h1EhLDaHtPVwKRedF-E,201 +pathvalidate/_base.py,sha256=NsynjO1IqYaG6rTbGkMx77OIfcUGSv51jLvMvIyyA1A,7443 +pathvalidate/_common.py,sha256=4JLadI56z-1xST0kfgjtiGMWCkmdlcfdrnZn5wIg_9k,3363 +pathvalidate/_const.py,sha256=UzAu38QxKjZDJEcJ-M99sQDnSpALIK7jJoZizFptiBw,686 +pathvalidate/_filename.py,sha256=YEhwJKEq73kLkqInYjbiagGO22q0iswiISzignbWZXE,17356 +pathvalidate/_filepath.py,sha256=z-QgwCNhy8KY6M8hK8JGeUh3YO-P4_7qAE1p9_LFSXc,18915 +pathvalidate/_ltsv.py,sha256=BuCgH-iLdptUbaghoLCXwk7DQFGBBFjuNGeDv2I0IsM,1203 +pathvalidate/_symbol.py,sha256=8kcG9D7IWCdfw3x18I8qSmA09vpHfQB2suVtMloGu28,2326 +pathvalidate/_types.py,sha256=3CRkyBkMvcPcFPigO-Kr18Z6RgGEgUdLK1cXBg8UjWc,180 +pathvalidate/argparse.py,sha256=z_z7inal8sw2wPwFjsMEMQ2zR3kACdK1qsItocXFf3Y,970 +pathvalidate/click.py,sha256=IvaOB4R7ivR3GNPGaROAzOGBcROWIIsZKADJ08hxab4,1077 +pathvalidate/error.py,sha256=t6ePXdcW3ALnv0c_iEDtjLA8hS7USopJamttH5bmnmQ,7531 +pathvalidate/handler.py,sha256=RDOka3TjLz91yqQdLirQmjhFyEt5PVepk6kmGAAes8o,3268 +pathvalidate/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..268c00e0364506fa384d9b019001de22beee5332 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e94d542bda0af891f0762350f90ed4361e75d07c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/METADATA @@ -0,0 +1,929 @@ +Metadata-Version: 2.1 +Name: pytablewriter +Version: 1.2.0 +Summary: pytablewriter is a Python library to write a table in various formats: AsciiDoc / CSV / Elasticsearch / HTML / JavaScript / JSON / LaTeX / LDJSON / LTSV / Markdown / MediaWiki / NumPy / Excel / Pandas / Python / reStructuredText / SQLite / TOML / TSV / YAML. +Home-page: https://github.com/thombashi/pytablewriter +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Changlog, https://github.com/thombashi/pytablewriter/releases +Project-URL: Documentation, https://pytablewriter.rtfd.io/ +Project-URL: Funding, https://github.com/sponsors/thombashi +Project-URL: Source, https://github.com/thombashi/pytablewriter +Project-URL: Tracker, https://github.com/thombashi/pytablewriter/issues +Keywords: AsciiDoc,table,CSV,Excel,JavaScript,JSON,LaTeX,LTSV,Markdown,MediaWiki,HTML,pandas,reStructuredText,SQLite,TSV,TOML +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Code Generators +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Topic :: Text Processing :: Markup :: LaTeX +Classifier: Topic :: Text Processing :: Markup :: Markdown +Classifier: Topic :: Text Processing :: Markup :: reStructuredText +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: setuptools >=38.3.0 +Requires-Dist: DataProperty <2,>=1.0.1 +Requires-Dist: mbstrdecoder <2,>=1.0.0 +Requires-Dist: pathvalidate <4,>=2.3.0 +Requires-Dist: tabledata <2,>=1.3.1 +Requires-Dist: tcolorpy <1,>=0.0.5 +Requires-Dist: typepy[datetime] <2,>=1.3.2 +Provides-Extra: all +Requires-Dist: xlwt ; extra == 'all' +Requires-Dist: XlsxWriter <4,>=0.9.6 ; extra == 'all' +Requires-Dist: elasticsearch <9,>=8.0.1 ; extra == 'all' +Requires-Dist: pytablereader <2,>=0.31.3 ; extra == 'all' +Requires-Dist: dominate <3,>=2.1.5 ; extra == 'all' +Requires-Dist: loguru <1,>=0.4.1 ; extra == 'all' +Requires-Dist: SimpleSQLite <2,>=1.3.2 ; extra == 'all' +Requires-Dist: pytablewriter-altrow-theme <1,>=0.2.0 ; extra == 'all' +Requires-Dist: pytablewriter-altcol-theme <1,>=0.1.0 ; extra == 'all' +Requires-Dist: toml <1,>=0.9.3 ; extra == 'all' +Requires-Dist: PyYAML <7,>=3.11 ; extra == 'all' +Requires-Dist: simplejson <4,>=3.8.1 ; extra == 'all' +Requires-Dist: pandas <3,>=0.25.3 ; extra == 'all' +Provides-Extra: docs +Requires-Dist: sphinx-rtd-theme >=1.2.2 ; extra == 'docs' +Requires-Dist: Sphinx >=2.4 ; extra == 'docs' +Requires-Dist: xlwt ; extra == 'docs' +Requires-Dist: XlsxWriter <4,>=0.9.6 ; extra == 'docs' +Requires-Dist: elasticsearch <9,>=8.0.1 ; extra == 'docs' +Requires-Dist: pytablereader <2,>=0.31.3 ; extra == 'docs' +Requires-Dist: dominate <3,>=2.1.5 ; extra == 'docs' +Requires-Dist: loguru <1,>=0.4.1 ; extra == 'docs' +Requires-Dist: SimpleSQLite <2,>=1.3.2 ; extra == 'docs' +Requires-Dist: pytablewriter-altrow-theme <1,>=0.2.0 ; extra == 'docs' +Requires-Dist: pytablewriter-altcol-theme <1,>=0.1.0 ; extra == 'docs' +Requires-Dist: toml <1,>=0.9.3 ; extra == 'docs' +Requires-Dist: PyYAML <7,>=3.11 ; extra == 'docs' +Requires-Dist: simplejson <4,>=3.8.1 ; extra == 'docs' +Requires-Dist: pandas <3,>=0.25.3 ; extra == 'docs' +Provides-Extra: es +Requires-Dist: elasticsearch <9,>=8.0.1 ; extra == 'es' +Provides-Extra: es8 +Requires-Dist: elasticsearch <9,>=8.0.1 ; extra == 'es8' +Provides-Extra: excel +Requires-Dist: xlwt ; extra == 'excel' +Requires-Dist: XlsxWriter <4,>=0.9.6 ; extra == 'excel' +Provides-Extra: from +Requires-Dist: pytablereader <2,>=0.31.3 ; extra == 'from' +Provides-Extra: html +Requires-Dist: dominate <3,>=2.1.5 ; extra == 'html' +Provides-Extra: logging +Requires-Dist: loguru <1,>=0.4.1 ; extra == 'logging' +Provides-Extra: pandas +Requires-Dist: pandas <3,>=0.25.3 ; extra == 'pandas' +Provides-Extra: sqlite +Requires-Dist: SimpleSQLite <2,>=1.3.2 ; extra == 'sqlite' +Provides-Extra: test +Requires-Dist: pandas <3,>=0.25.3 ; extra == 'test' +Requires-Dist: XlsxWriter <4,>=0.9.6 ; extra == 'test' +Requires-Dist: beautifulsoup4 >=4.10 ; extra == 'test' +Requires-Dist: toml <1,>=0.9.3 ; extra == 'test' +Requires-Dist: pytablewriter-altcol-theme <1,>=0.1.0 ; extra == 'test' +Requires-Dist: pytest-md-report >=0.4.1 ; extra == 'test' +Requires-Dist: pytablereader <2,>=0.31.3 ; extra == 'test' +Requires-Dist: SimpleSQLite <2,>=1.3.2 ; extra == 'test' +Requires-Dist: dominate <3,>=2.1.5 ; extra == 'test' +Requires-Dist: pytablewriter-altrow-theme <1,>=0.2.0 ; extra == 'test' +Requires-Dist: loguru <1,>=0.4.1 ; extra == 'test' +Requires-Dist: xlwt ; extra == 'test' +Requires-Dist: PyYAML <7,>=3.11 ; extra == 'test' +Requires-Dist: elasticsearch <9,>=8.0.1 ; extra == 'test' +Requires-Dist: tablib >=3.2.0 ; extra == 'test' +Requires-Dist: pytablereader[excel,sqlite] >=0.31.3 ; extra == 'test' +Requires-Dist: simplejson <4,>=3.8.1 ; extra == 'test' +Requires-Dist: sqliteschema >=1.3.0 ; extra == 'test' +Requires-Dist: pytest >=6.0.1 ; extra == 'test' +Provides-Extra: theme +Requires-Dist: pytablewriter-altrow-theme <1,>=0.2.0 ; extra == 'theme' +Requires-Dist: pytablewriter-altcol-theme <1,>=0.1.0 ; extra == 'theme' +Provides-Extra: toml +Requires-Dist: toml <1,>=0.9.3 ; extra == 'toml' +Provides-Extra: yaml +Requires-Dist: PyYAML <7,>=3.11 ; extra == 'yaml' + +.. contents:: **pytablewriter** + :backlinks: top + :depth: 2 + +Summary +========= +`pytablewriter `__ is a Python library to write a table in various formats: AsciiDoc / CSV / Elasticsearch / HTML / JavaScript / JSON / LaTeX / LDJSON / LTSV / Markdown / MediaWiki / NumPy / Excel / Pandas / Python / reStructuredText / SQLite / TOML / TSV / YAML. + +.. image:: https://badge.fury.io/py/pytablewriter.svg + :target: https://badge.fury.io/py/pytablewriter + :alt: PyPI package version + +.. image:: https://anaconda.org/conda-forge/pytablewriter/badges/version.svg + :target: https://anaconda.org/conda-forge/pytablewriter + :alt: conda-forge package version + +.. image:: https://img.shields.io/pypi/pyversions/pytablewriter.svg + :target: https://pypi.org/project/pytablewriter/ + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/pytablewriter.svg + :target: https://pypi.org/project/pytablewriter + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/pytablewriter/actions/workflows/ci.yml/badge.svg + :target: https://github.com/thombashi/pytablewriter/actions/workflows/ci.yml + :alt: CI status of Linux/macOS/Windows + +.. image:: https://coveralls.io/repos/github/thombashi/pytablewriter/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/pytablewriter?branch=master + :alt: Test coverage + +.. image:: https://github.com/thombashi/pytablewriter/actions/workflows/github-code-scanning/codeql/badge.svg + :target: https://github.com/thombashi/pytablewriter/actions/workflows/github-code-scanning/codeql + :alt: CodeQL + +Features +-------- +- Write a table in various formats: + - Text formats: + - `AsciiDoc `__ + - CSV / Tab-separated values (TSV) / Space-separated values (SSV) + - HTML / CSS + - JSON / `Line-delimited JSON(LDJSON) `__ + - `Labeled Tab-separated Values (LTSV) `__ + - LaTeX: ``tabular``/``array`` environment + - Markdown: CommonMark / `GitHub Flavored Markdown (GFM) `__ / `kramdown `__ + - `MediaWiki `__ + - reStructuredText: `Grid Tables `__/`Simple Tables `__/`CSV Table `__ + - Source code (definition of a variable that represents tabular data) + - JavaScript / `NumPy `__ (`numpy.array `__) / `Pandas `__ (`pandas.DataFrame `__) / Python + - `TOML `__ + - `YAML `__ + - Unicode + - Binary file formats: + - Microsoft Excel :superscript:`TM` (``.xlsx``/``.xls`` file format) + - `pandas.DataFrame `__ pickle file + - `SQLite `__ database + - Application-specific formats: + - `Elasticsearch `__ +- Automatic table cell formatting: + - Alignment + - Padding + - Decimal places of numbers +- Customize table cell styles: + - Text/Background color + - Text alignment + - Font size/weight + - Thousand separator for numbers: e.g. ``1,000``/``1 000`` +- Configure output: + - Write a table to a stream such as a file/standard-output/string-buffer/Jupyter-Notebook + - Get rendered tabular text +- Data sources: + - nested list + - CSV + - `pandas.DataFrame `__ / `pandas.Series `__ + - etc. +- Multibyte character support +- ANSI color support + +Installation +============ + +Installation: pip +------------------------------ +:: + + pip install pytablewriter + +Some of the formats require additional dependency packages, you can install these packages as follows: + +.. csv-table:: Installation of optional dependencies + :header: Installation example, Remark + + ``pip install pytablewriter[es]``, Elasticsearch + ``pip install pytablewriter[excel]``, Excel + ``pip install pytablewriter[html]``, HTML + ``pip install pytablewriter[sqlite]``, SQLite database + ``pip install pytablewriter[toml]``, TOML + ``pip install pytablewriter[theme]``, pytablewriter theme plugins + ``pip install pytablewriter[all]``, Install all of the optional dependencies + +Installation: conda +------------------------------ +:: + + conda install -c conda-forge pytablewriter + +Installation: apt +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-pytablewriter + +Examples +========== +Write tables +-------------- +Write a Markdown table +~~~~~~~~~~~~~~~~~~~~~~~~ +:Sample Code: + .. code-block:: python + + from pytablewriter import MarkdownTableWriter + + def main(): + writer = MarkdownTableWriter( + table_name="example_table", + headers=["int", "float", "str", "bool", "mix", "time"], + value_matrix=[ + [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], + [2, "-2.23", "foo", False, None, "2017-12-23 45:01:23+0900"], + [3, 0, "bar", "true", "inf", "2017-03-03 33:44:55+0900"], + [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], + ], + ) + writer.write_table() + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + # example_table + |int|float|str |bool | mix | time | + |--:|----:|----|-----|-------:|------------------------| + | 0| 0.10|hoge|True | 0|2017-01-01 03:04:05+0900| + | 2|-2.23|foo |False| |2017-12-23 12:34:51+0900| + | 3| 0.00|bar |True |Infinity|2017-03-03 22:44:55+0900| + |-10|-9.90| |False| NaN|2017-01-01 00:00:00+0900| + +:Rendering Result: + .. figure:: https://cdn.jsdelivr.net/gh/thombashi/pytablewriter@master/docs/pages/examples/table_format/text/ss/markdown.png + :scale: 80% + :alt: https://github.com/thombashi/pytablewriter/blob/master/docs/pages/examples/table_format/text/ss/markdown.png + + Rendered markdown at GitHub + +Write a Markdown table with margins +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:Sample Code: + .. code-block:: python + + from pytablewriter import MarkdownTableWriter + + def main(): + writer = MarkdownTableWriter( + table_name="write a table with margins", + headers=["int", "float", "str", "bool", "mix", "time"], + value_matrix=[ + [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], + [2, "-2.23", "foo", False, None, "2017-12-23 45:01:23+0900"], + [3, 0, "bar", "true", "inf", "2017-03-03 33:44:55+0900"], + [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], + ], + margin=1 # add a whitespace for both sides of each cell + ) + writer.write_table() + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + # write a table with margins + | int | float | str | bool | mix | time | + | --: | ----: | ---- | ----- | -------: | ------------------------ | + | 0 | 0.10 | hoge | True | 0 | 2017-01-01 03:04:05+0900 | + | 2 | -2.23 | foo | False | | 2017-12-23 12:34:51+0900 | + | 3 | 0.00 | bar | True | Infinity | 2017-03-03 22:44:55+0900 | + | -10 | -9.90 | | False | NaN | 2017-01-01 00:00:00+0900 | + +``margin`` attribute can be available for all of the text format writer classes. + +Write a GitHub Flavored Markdown (GFM) table +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +If you set ``flavor`` keyword argument of ``MarkdownTableWriter`` class to ``"github"`` or ``"gfm"``, the writer will output markdown tables with GitHub flavor. +GFM can apply some additional styles to tables such as ``fg_color`` (text color). + +:Sample Code: + .. code-block:: python + + from pytablewriter import MarkdownTableWriter + from pytablewriter.style import Style + + writer = MarkdownTableWriter( + column_styles=[ + Style(fg_color="red"), + Style(fg_color="green", decoration_line="underline"), + ], + headers=["A", "B"], + value_matrix=[ + ["abc", 1], + ["efg", 2], + ], + margin=1, + flavor="github", + enable_ansi_escape=False, + ) + writer.write_table() + +Rendered results can be found at `here `__ + +Apply styles to GFM table with programmatically +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Applying style filters to GFM allows for more flexible style settings for cells. +See also the `example <#style-filter>`_ + +Write a Markdown table to a stream or a file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`Refer an example `__ + +Write a table to an Excel sheet +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:Sample Code: + .. code-block:: python + + from pytablewriter import ExcelXlsxTableWriter + + def main(): + writer = ExcelXlsxTableWriter() + writer.table_name = "example" + writer.headers = ["int", "float", "str", "bool", "mix", "time"] + writer.value_matrix = [ + [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], + [2, "-2.23", "foo", False, None, "2017-12-23 12:34:51+0900"], + [3, 0, "bar", "true", "inf", "2017-03-03 22:44:55+0900"], + [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], + ] + writer.dump("sample.xlsx") + + if __name__ == "__main__": + main() + +:Output: + .. figure:: https://cdn.jsdelivr.net/gh/thombashi/pytablewriter@master/docs/pages/examples/table_format/binary/spreadsheet/ss/excel_single.png + :scale: 100% + :alt: https://github.com/thombashi/pytablewriter/blob/master/docs/pages/examples/table_format/binary/spreadsheet/ss/excel_single.png + + Output excel file (``sample_single.xlsx``) + +Write a Unicode table +~~~~~~~~~~~~~~~~~~~~~~~ +:Sample Code: + .. code-block:: python + + from pytablewriter import UnicodeTableWriter + + def main(): + writer = UnicodeTableWriter( + table_name="example_table", + headers=["int", "float", "str", "bool", "mix", "time"], + value_matrix=[ + [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], + [2, "-2.23", "foo", False, None, "2017-12-23 45:01:23+0900"], + [3, 0, "bar", "true", "inf", "2017-03-03 33:44:55+0900"], + [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], + ] + ) + writer.write_table() + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + ┌───┬─────┬────┬─────┬────────┬────────────────────────┐ + │int│float│str │bool │ mix │ time │ + ├───┼─────┼────┼─────┼────────┼────────────────────────┤ + │ 0│ 0.10│hoge│True │ 0│2017-01-01 03:04:05+0900│ + ├───┼─────┼────┼─────┼────────┼────────────────────────┤ + │ 2│-2.23│foo │False│ │2017-12-23 12:34:51+0900│ + ├───┼─────┼────┼─────┼────────┼────────────────────────┤ + │ 3│ 0.00│bar │True │Infinity│2017-03-03 22:44:55+0900│ + ├───┼─────┼────┼─────┼────────┼────────────────────────┤ + │-10│-9.90│ │False│ NaN│2017-01-01 00:00:00+0900│ + └───┴─────┴────┴─────┴────────┴────────────────────────┘ + +Write a table with JavaScript format (as a nested list variable definition) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:Sample Code: + .. code-block:: python + + import pytablewriter as ptw + + + def main(): + writer = ptw.JavaScriptTableWriter( + table_name="js_variable", + headers=["int", "float", "str", "bool", "mix", "time"], + value_matrix=[ + [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], + [2, "-2.23", "foo", False, None, "2017-12-23 45:01:23+0900"], + [3, 0, "bar", "true", "inf", "2017-03-03 33:44:55+0900"], + [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], + ], + ) + + writer.write_table() + + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: js + + const js_variable = [ + ["int", "float", "str", "bool", "mix", "time"], + [0, 0.1, "hoge", true, 0, "2017-01-01 03:04:05+0900"], + [2, -2.23, "foo", false, null, "2017-12-23 45:01:23+0900"], + [3, 0, "bar", true, Infinity, "2017-03-03 33:44:55+0900"], + [-10, -9.9, "", "FALSE", NaN, "2017-01-01 00:00:00+0900"] + ]; + +Write a Markdown table from ``pandas.DataFrame`` instance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``from_dataframe`` method of writer classes will set up tabular data from ``pandas.DataFrame``: + +:Sample Code: + .. code-block:: python + + from textwrap import dedent + import pandas as pd + import io + from pytablewriter import MarkdownTableWriter + + def main(): + csv_data = io.StringIO(dedent("""\ + "i","f","c","if","ifc","bool","inf","nan","mix_num","time" + 1,1.10,"aa",1.0,"1",True,Infinity,NaN,1,"2017-01-01 00:00:00+09:00" + 2,2.20,"bbb",2.2,"2.2",False,Infinity,NaN,Infinity,"2017-01-02 03:04:05+09:00" + 3,3.33,"cccc",-3.0,"ccc",True,Infinity,NaN,NaN,"2017-01-01 00:00:00+09:00" + """)) + df = pd.read_csv(csv_data, sep=',') + + writer = MarkdownTableWriter(dataframe=df) + writer.write_table() + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + | i | f | c | if |ifc|bool | inf |nan|mix_num | time | + |--:|---:|----|---:|---|-----|--------|---|-------:|-------------------------| + | 1|1.10|aa | 1.0| 1|True |Infinity|NaN| 1|2017-01-01 00:00:00+09:00| + | 2|2.20|bbb | 2.2|2.2|False|Infinity|NaN|Infinity|2017-01-02 03:04:05+09:00| + | 3|3.33|cccc|-3.0|ccc|True |Infinity|NaN| NaN|2017-01-01 00:00:00+09:00| + + +Adding a column of the DataFrame index if you specify ``add_index_column=True``: + +:Sample Code: + .. code-block:: python + + import pandas as pd + import pytablewriter as ptw + + def main(): + writer = ptw.MarkdownTableWriter(table_name="add_index_column") + writer.from_dataframe( + pd.DataFrame({"A": [1, 2], "B": [10, 11]}, index=["a", "b"]), + add_index_column=True, + ) + writer.write_table() + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + # add_index_column + | | A | B | + |---|--:|--:| + |a | 1| 10| + |b | 2| 11| + +Write a Markdown table from space-separated values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:Sample Code: + .. code-block:: python + + import pytablewriter as ptw + + + def main(): + writer = ptw.MarkdownTableWriter(table_name="ps") + writer.from_csv( + """ + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 0.0 0.4 77664 8784 ? Ss May11 0:02 /sbin/init + root 2 0.0 0.0 0 0 ? S May11 0:00 [kthreadd] + root 4 0.0 0.0 0 0 ? I< May11 0:00 [kworker/0:0H] + root 6 0.0 0.0 0 0 ? I< May11 0:00 [mm_percpu_wq] + root 7 0.0 0.0 0 0 ? S May11 0:01 [ksoftirqd/0] + """, + delimiter=" ", + ) + writer.write_table() + + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + # ps + |USER|PID|%CPU|%MEM| VSZ |RSS |TTY|STAT|START|TIME| COMMAND | + |----|--:|---:|---:|----:|---:|---|----|-----|----|--------------| + |root| 1| 0| 0.4|77664|8784|? |Ss |May11|0:02|/sbin/init | + |root| 2| 0| 0.0| 0| 0|? |S |May11|0:00|[kthreadd] | + |root| 4| 0| 0.0| 0| 0|? |I< |May11|0:00|[kworker/0:0H]| + |root| 6| 0| 0.0| 0| 0|? |I< |May11|0:00|[mm_percpu_wq]| + |root| 7| 0| 0.0| 0| 0|? |S |May11|0:01|[ksoftirqd/0] | + +Get rendered tabular text as str +---------------------------------- +``dumps`` method returns rendered tabular text. +``dumps`` only available for text format writers. + +:Sample Code: + .. code-block:: python + + import pytablewriter as ptw + + + def main(): + writer = ptw.MarkdownTableWriter( + headers=["int", "float", "str", "bool", "mix", "time"], + value_matrix=[ + [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], + [2, "-2.23", "foo", False, None, "2017-12-23 45:01:23+0900"], + [3, 0, "bar", "true", "inf", "2017-03-03 33:44:55+0900"], + [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], + ], + ) + + print(writer.dumps()) + + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + |int|float|str |bool | mix | time | + |--:|----:|----|-----|-------:|------------------------| + | 0| 0.10|hoge|True | 0|2017-01-01 03:04:05+0900| + | 2|-2.23|foo |False| |2017-12-23 45:01:23+0900| + | 3| 0.00|bar |True |Infinity|2017-03-03 33:44:55+0900| + |-10|-9.90| |False| NaN|2017-01-01 00:00:00+0900| + +Configure table styles +------------------------ +Column styles +~~~~~~~~~~~~~~~ +Writers can specify +`Style `__ +for each column by ``column_styles`` attribute of writer classes. + +:Sample Code: + .. code-block:: python + + import pytablewriter as ptw + from pytablewriter.style import Style + + + def main(): + writer = ptw.MarkdownTableWriter( + table_name="set style by column_styles", + headers=[ + "auto align", + "left align", + "center align", + "bold", + "italic", + "bold italic ts", + ], + value_matrix=[ + [11, 11, 11, 11, 11, 11], + [1234, 1234, 1234, 1234, 1234, 1234], + ], + column_styles=[ + Style(), + Style(align="left"), + Style(align="center"), + Style(font_weight="bold"), + Style(font_style="italic"), + Style(font_weight="bold", font_style="italic", thousand_separator=","), + ], # specify styles for each column + ) + writer.write_table() + + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + # set style by styles + |auto align|left align|center align| bold |italic|bold italic ts| + |---------:|----------|:----------:|-------:|-----:|-------------:| + | 11|11 | 11 | **11**| _11_| _**11**_| + | 1234|1234 | 1234 |**1234**|_1234_| _**1,234**_| + + `Rendering result `__ + + +You can also set ``Style`` to a specific column with an index or header by using ``set_style`` method: + +:Sample Code: + .. code-block:: python + + from pytablewriter import MarkdownTableWriter + from pytablewriter.style import Style + + def main(): + writer = MarkdownTableWriter() + writer.headers = ["A", "B", "C",] + writer.value_matrix = [[11, 11, 11], [1234, 1234, 1234]] + + writer.table_name = "set style by column index" + writer.set_style(1, Style(align="center", font_weight="bold")) + writer.set_style(2, Style(thousand_separator=" ")) + writer.write_table() + writer.write_null_line() + + writer.table_name = "set style by header" + writer.set_style("B", Style(font_style="italic")) + writer.write_table() + + if __name__ == "__main__": + main() + +:Output: + .. code-block:: + + # set style by column index + | A | B | C | + |---:|:------:|----:| + | 11| **11** | 11| + |1234|**1234**|1 234| + + # set style by header + | A | B | C | + |---:|-----:|----:| + | 11| _11_| 11| + |1234|_1234_|1 234| + +Style filter +~~~~~~~~~~~~~~ +You can apply styles to specific cells by using style filters. +Style filters will be written as Python functions. +Examples of a style filter function and how you apply it are as follows: + +:Sample Code: + .. code-block:: python + + from typing import Any, Optional + + from pytablewriter import MarkdownTableWriter + from pytablewriter.style import Cell, Style + + + def style_filter(cell: Cell, **kwargs: Any) -> Optional[Style]: + if cell.is_header_row(): + return None + + if cell.col == 0: + return Style(font_weight="bold") + + value = int(cell.value) + + if value > 80: + return Style(fg_color="red", font_weight="bold", decoration_line="underline") + elif value > 50: + return Style(fg_color="yellow", font_weight="bold") + elif value > 20: + return Style(fg_color="green") + + return Style(fg_color="lightblue") + + + writer = MarkdownTableWriter( + table_name="style filter example", + headers=["Key", "Value 1", "Value 2"], + value_matrix=[ + ["A", 95, 40], + ["B", 55, 5], + ["C", 30, 85], + ["D", 0, 69], + ], + flavor="github", + enable_ansi_escape=False, + ) + writer.add_style_filter(style_filter) + writer.write_table() + +Rendered results can be found at `here `__ + +Theme +~~~~~~~ +Themes consists of a set of style filters. +The following command will install external predefined themes: + +:: + + pip install pytablewriter[theme] + +Themes can be set via the constructor of the writer classes or the ``set_theme`` method. +The following is an example of setting the ``altrow`` theme via the constructor. +``altrow`` theme will be colored rows alternatively: + +:Sample Code: + .. code-block:: python + + import pytablewriter as ptw + + writer = ptw.TableWriterFactory.create_from_format_name( + "markdown", + headers=["INT", "STR"], + value_matrix=[[1, "hoge"], [2, "foo"], [3, "bar"]], + margin=1, + theme="altrow", + ) + writer.write_table() + +:Output: + .. figure:: https://cdn.jsdelivr.net/gh/thombashi/pytablewriter-altrow-theme@master/ss/ptw-altrow-theme_example_default.png + :scale: 100% + :alt: https://github.com/thombashi/pytablewriter-altrow-theme/blob/master/ss/ptw-altrow-theme_example_default.png + +`[theme]` extras includes the following themes: + +- `pytablewriter-altrow-theme `__ + - `Generated HTML table example `__ +- `pytablewriter-altcol-theme `__ + - `Generated HTML table example `__ + +Make tables for specific applications +--------------------------------------- +Render a table on Jupyter Notebook +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +All table writer class instances in ``pytablewriter`` can render in Jupyter Notebook. +To render writers at notebook cells, you will require the dependency packages to be installed either by: + +- ``pip install pytablewriter[html]`` or +- ``pip install pytablewriter[all]`` + +Jupyter Notebook code examples can be found `here `__: + +.. figure:: https://cdn.jsdelivr.net/gh/thombashi/pytablewriter@master/ss/jupyter_notebook.png + :scale: 100% + :alt: https://github.com/thombashi/pytablewriter/blob/master/ss/jupyter_notebook.png + + Table rendering results of Jupyter Notebook + +Multibyte character support +----------------------------- +Write a table using multibyte character +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +You can use multibyte characters as table data. +Multibyte characters are also properly padded and aligned. + +:Sample Code: + .. code-block:: python + + import pytablewriter as ptw + + + def main(): + writer = ptw.RstSimpleTableWriter( + table_name="生成に関するパターン", + headers=["パターン名", "概要", "GoF", "Code Complete[1]"], + value_matrix=[ + ["Abstract Factory", "関連する一連のインスタンスを状況に応じて、適切に生成する方法を提供する。", "Yes", "Yes"], + ["Builder", "複合化されたインスタンスの生成過程を隠蔽する。", "Yes", "No"], + ["Factory Method", "実際に生成されるインスタンスに依存しない、インスタンスの生成方法を提供する。", "Yes", "Yes"], + ["Prototype", "同様のインスタンスを生成するために、原型のインスタンスを複製する。", "Yes", "No"], + ["Singleton", "あるクラスについて、インスタンスが単一であることを保証する。", "Yes", "Yes"], + ], + ) + writer.write_table() + + + if __name__ == "__main__": + main() + +:Output: + .. figure:: https://cdn.jsdelivr.net/gh/thombashi/pytablewriter@master/docs/pages/examples/multibyte/ss/multi_byte_char.png + :scale: 100% + :alt: https://github.com/thombashi/pytablewriter/blob/master/docs/pages/examples/multibyte/ss/multi_byte_char.png + + Output of multi-byte character table + +Multiprocessing +----------------- +You can increase the number of workers to process table data via ``max_workers`` attribute of a writer. +The more ``max_workers`` the less processing time when tabular data is large and the execution environment has available cores. + +If you increase ``max_workers`` larger than one, recommend using main guarded as follows to avoid problems caused by multi-processing: + +.. code-block:: python + + from multiprocessing import cpu_count + import pytablewriter as ptw + + def main(): + writer = ptw.MarkdownTableWriter() + writer.max_workers = cpu_count() + ... + + if __name__ == "__main__": + main() + +For more information +---------------------- +More examples are available at +https://pytablewriter.rtfd.io/en/latest/pages/examples/index.html + +Dependencies +============ +- Python 3.7+ +- `Python package dependencies (automatically installed) `__ + + +Optional dependencies +--------------------- +- ``logging`` extras + - `loguru `__: Used for logging if the package installed +- ``from`` extras + - `pytablereader `__ +- ``es`` extra + - `elasticsearch `__ +- ``excel`` extras + - `xlwt `__ + - `XlsxWriter `__ +- ``html`` extras + - `dominate `__ +- ``sqlite`` extras + - `SimpleSQLite `__ +- ``theme`` extras + - `pytablewriter-altrow-theme `__ + - `pytablewriter-altcol-theme `__ +- ``toml`` extras + - `toml `__ + +Documentation +=============== +https://pytablewriter.rtfd.io/ + +Projects using pytablewriter +================================== +- `pytest-md-report `__ + + +Related Projects +================================== +- `pytablereader `__ + - Tabular data loaded by ``pytablereader`` can be written another tabular data format with ``pytablewriter``. + +Sponsors +==================================== +.. image:: https://avatars.githubusercontent.com/u/44389260?s=48&u=6da7176e51ae2654bcfd22564772ef8a3bb22318&v=4 + :target: https://github.com/chasbecker + :alt: Charles Becker (chasbecker) +.. image:: https://avatars.githubusercontent.com/u/46711571?s=48&u=57687c0e02d5d6e8eeaf9177f7b7af4c9f275eb5&v=4 + :target: https://github.com/Arturi0 + :alt: onetime: Arturi0 +.. image:: https://avatars.githubusercontent.com/u/3658062?s=48&v=4 + :target: https://github.com/b4tman + :alt: onetime: Dmitry Belyaev (b4tman) + +`Become a sponsor `__ + diff --git a/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..fe5ba01796a258832cdfd58e244dea7070f0c515 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/RECORD @@ -0,0 +1,139 @@ +pytablewriter-1.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytablewriter-1.2.0.dist-info/LICENSE,sha256=qT11vLB3TimQEGOAytrW3LLeGTxV1DX_xWujRaCLHcI,1084 +pytablewriter-1.2.0.dist-info/METADATA,sha256=0Wnu9isWPIZJlWz2FbvlAtx2yycl-4II7DxUMD9yO_o,37921 +pytablewriter-1.2.0.dist-info/RECORD,, +pytablewriter-1.2.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +pytablewriter-1.2.0.dist-info/top_level.txt,sha256=4qovxzrpT62Feu8LLdPGtIqYBswTr4QcU4mRmpM61-k,14 +pytablewriter/__init__.py,sha256=E2Y4TxopUWgqMateYeM22S6pGZct8qa_S78a1J_x9ao,2942 +pytablewriter/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/__pycache__/__version__.cpython-310.pyc,, +pytablewriter/__pycache__/_converter.cpython-310.pyc,, +pytablewriter/__pycache__/_factory.cpython-310.pyc,, +pytablewriter/__pycache__/_function.cpython-310.pyc,, +pytablewriter/__pycache__/_table_format.cpython-310.pyc,, +pytablewriter/__pycache__/_typing.cpython-310.pyc,, +pytablewriter/__pycache__/error.cpython-310.pyc,, +pytablewriter/__version__.py,sha256=jMpcYYHOmAVqxHupt-XeoKSCb2KyHxAKYdLvxCET3VU,201 +pytablewriter/_converter.py,sha256=iPlzCNzbGPJ4eSfgMz7DwD7GjaV0n1zxBm_iIzbvG7E,238 +pytablewriter/_factory.py,sha256=jd12k0fPgy7YwdXjO26T4MK-XxEOLHZylUaUEcX4HH4,10839 +pytablewriter/_function.py,sha256=rBDD1Uka9k7R4adjUf2syCAipN4me7ymNJXpAGoO7kk,2402 +pytablewriter/_logger/__init__.py,sha256=DzORajZGSzcVR5wMlNgQ2b54Pr1CBgaN3OycGTp9s7g,107 +pytablewriter/_logger/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/_logger/__pycache__/_logger.cpython-310.pyc,, +pytablewriter/_logger/__pycache__/_null_logger.cpython-310.pyc,, +pytablewriter/_logger/_logger.py,sha256=-kcFift5s8FXFqB4ajALK7Dpnkyc9aWRGV6JrXZhpgI,3287 +pytablewriter/_logger/_null_logger.py,sha256=QJuaErUIV_x6NjQ9qNX9eNSi_GB_9CrO7lKeXYZnuaw,1088 +pytablewriter/_table_format.py,sha256=CowmtamVcQYT4zmvGbw6vIexTBadtSigoDmw9_FamlM,9446 +pytablewriter/_typing.py,sha256=HRJjzKYxa8rxk0DOurr5LPTs57flr7aQKiKjBtkR4is,109604 +pytablewriter/error.py,sha256=MwPbc4EtUklc7X3eVWADVCA6rrzelfsBcH16E0pJQeE,787 +pytablewriter/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pytablewriter/sanitizer/__init__.py,sha256=Ob9cbVV0DBI6W6fupmMIHEgoSCdaGeyxo_VhfvNizEM,703 +pytablewriter/sanitizer/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/sanitizer/__pycache__/_base.cpython-310.pyc,, +pytablewriter/sanitizer/__pycache__/_elasticsearch.cpython-310.pyc,, +pytablewriter/sanitizer/__pycache__/_excel.cpython-310.pyc,, +pytablewriter/sanitizer/__pycache__/_interface.cpython-310.pyc,, +pytablewriter/sanitizer/__pycache__/_javascript.cpython-310.pyc,, +pytablewriter/sanitizer/__pycache__/_python.cpython-310.pyc,, +pytablewriter/sanitizer/_base.py,sha256=njQCXCWzVmvgZuGH5qDKVNqMSAKj6hHBla1S36_g4qo,2898 +pytablewriter/sanitizer/_elasticsearch.py,sha256=yTUCMks3ghGMrClpvE5jEplOi-8tZ0cJx0vObi1eOBM,733 +pytablewriter/sanitizer/_excel.py,sha256=6zWWt6Umtt7kCCO7HPYeEqFQkj5OlkOJqMlJdPiMLFY,2458 +pytablewriter/sanitizer/_interface.py,sha256=mH2SpdHYgvpENEfLmGZhQnfSs-1DD86PhT6g69fI5kE,913 +pytablewriter/sanitizer/_javascript.py,sha256=UO2KzHncysO6pWYGiLstVWDMskVb5apzz2PfLsgzHrQ,3570 +pytablewriter/sanitizer/_python.py,sha256=RYEzmPuCx7D1y5mhjoTzoeHhIaVVVaHpWbvwTFkFyJw,3072 +pytablewriter/style/__init__.py,sha256=OmdQIAKEu8o5E9Xu9fN_kQ1SAtCZZPebFEY8QQjGFpQ,1107 +pytablewriter/style/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/style/__pycache__/_cell.cpython-310.pyc,, +pytablewriter/style/__pycache__/_font.cpython-310.pyc,, +pytablewriter/style/__pycache__/_style.cpython-310.pyc,, +pytablewriter/style/__pycache__/_styler.cpython-310.pyc,, +pytablewriter/style/__pycache__/_styler_interface.cpython-310.pyc,, +pytablewriter/style/__pycache__/_theme.cpython-310.pyc,, +pytablewriter/style/_cell.py,sha256=Ggaq9xm2r_oXUv_W2eV1YLZeI-U0AVsTpAJBfj1Dozw,549 +pytablewriter/style/_font.py,sha256=f3e9bKB83JYu7Yow7EYA_6XCJvqyCSMvjrIXC-Uelfc,341 +pytablewriter/style/_style.py,sha256=VRXE01qJRZk3VAPuIU9q2pxTJupz4qHwPOieD1elsvA,12523 +pytablewriter/style/_styler.py,sha256=yfPSPGCiaKQvrzDZxMp36oChhGS13N6Jv1l9hxzBQSs,9920 +pytablewriter/style/_styler_interface.py,sha256=rM1OX8rYIQsk9vtPmbrrcTlf4e0_So2XrHT3L4z1bF8,828 +pytablewriter/style/_theme.py,sha256=A6t0Q-SkQhrwCTvXUVBE9rt-h-M-2VmNavtsMynzTLY,2948 +pytablewriter/typehint/__init__.py,sha256=FDTB4uiJDm2b0A6IsYtTVO2Z994tb5o3kcXbwkDDKYQ,545 +pytablewriter/typehint/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/writer/__init__.py,sha256=r0ZSklAeSM84jA4xzvTFaXHVe0Il0GjAQ8vk2_mtplQ,1766 +pytablewriter/writer/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/writer/__pycache__/_common.cpython-310.pyc,, +pytablewriter/writer/__pycache__/_elasticsearch.cpython-310.pyc,, +pytablewriter/writer/__pycache__/_interface.cpython-310.pyc,, +pytablewriter/writer/__pycache__/_msgfy.cpython-310.pyc,, +pytablewriter/writer/__pycache__/_null.cpython-310.pyc,, +pytablewriter/writer/__pycache__/_table_writer.cpython-310.pyc,, +pytablewriter/writer/_common.py,sha256=BjKw-NvsyNQw9D8Zrpg8RyjLjgQjc0QiLbp1bQoGROE,221 +pytablewriter/writer/_elasticsearch.py,sha256=tgcXdlIp_pX8J117mW99TEvCAP3vaz0TnxJnfo6B9NI,6272 +pytablewriter/writer/_interface.py,sha256=Vg_5HlUcOca-PhyoqDuRxzvyQGcuqie1_f1U_lGZKL0,2647 +pytablewriter/writer/_msgfy.py,sha256=Qf3VIhuCfstgGOEaYQswrW9R1lgYFknjw33YZJGNyFo,1777 +pytablewriter/writer/_null.py,sha256=YPBm1lc23wCQbVHuYKPPOTtdlZKfZOBIEWpkuBKQEw4,1590 +pytablewriter/writer/_table_writer.py,sha256=LHibO5UY8StL0fzcnLuc_FkbSoIv3f6D8CEZMWbD0Yk,41943 +pytablewriter/writer/binary/__init__.py,sha256=akvPmDxtQjvKEac2yx9c-96CURTFx0809iPPskpa25c,281 +pytablewriter/writer/binary/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/writer/binary/__pycache__/_excel.cpython-310.pyc,, +pytablewriter/writer/binary/__pycache__/_excel_workbook.cpython-310.pyc,, +pytablewriter/writer/binary/__pycache__/_interface.cpython-310.pyc,, +pytablewriter/writer/binary/__pycache__/_pandas.cpython-310.pyc,, +pytablewriter/writer/binary/__pycache__/_sqlite.cpython-310.pyc,, +pytablewriter/writer/binary/_excel.py,sha256=66h62U0xbboSDkuMB2qO5xT_tXcqtf-18XCH-yqgjgI,15436 +pytablewriter/writer/binary/_excel_workbook.py,sha256=E6xvw1zvTsYBhih5FeStRu23Q1bSJMgQ093pvxdVllI,3936 +pytablewriter/writer/binary/_interface.py,sha256=U48pCiVMUgeYSKCINncSN5Sy9OnYQ90LMhC7Ls1C8O0,1487 +pytablewriter/writer/binary/_pandas.py,sha256=__TzeKz31To7Kh4v7o8JKwTXfz0kYeNo27e_Bcs38LA,2633 +pytablewriter/writer/binary/_sqlite.py,sha256=ZnXqvidGUri1SM-Cxls1NwgVg9riDaPkFnr9iQjGchQ,2982 +pytablewriter/writer/text/__init__.py,sha256=_rk5sczp6H9sag4PXgKIbxSTrgW8HktmlJqN0cXR01M,1384 +pytablewriter/writer/text/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_asciidoc.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_borderless.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_common.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_css.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_csv.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_html.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_interface.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_json.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_jsonlines.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_latex.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_ltsv.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_markdown.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_mediawiki.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_rst.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_spacealigned.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_text_writer.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_toml.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_tsv.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_unicode.cpython-310.pyc,, +pytablewriter/writer/text/__pycache__/_yaml.cpython-310.pyc,, +pytablewriter/writer/text/_asciidoc.py,sha256=T7PQ2qpN68K0GgeORTUCmCy0uY7iq3xwpCU_7vxSrms,4362 +pytablewriter/writer/text/_borderless.py,sha256=4RhWiSppkS2bRIl8osmqkSst-hwDzaAT-GaSyHyHft4,1010 +pytablewriter/writer/text/_common.py,sha256=1YRanAyjyEgo9muaUM3n9pPieKsX0d5Y-_ktI92B_tA,554 +pytablewriter/writer/text/_css.py,sha256=-271SLbV9wYm2YLqUf64HBrkoL2iBX1wxSUeyzyJOk4,5488 +pytablewriter/writer/text/_csv.py,sha256=zfL8yUspvp98JYyqEi_OtB0Xp7wYp11CXQSVX3lhuiY,1490 +pytablewriter/writer/text/_html.py,sha256=4zn3eXdvKS7s_gJbSlkQhb-dJoQjxf_pq9pms1XHkzw,6327 +pytablewriter/writer/text/_interface.py,sha256=Qcwjq6w_dz5Lk7Txr42ESnomW0316-LqPBo1HmcRP7I,642 +pytablewriter/writer/text/_json.py,sha256=djJKShELQfISL_S2XUNvl67ToFST08tw4E7WyGurvRs,5073 +pytablewriter/writer/text/_jsonlines.py,sha256=5tQqMzsJQumpvIykCxiLNmjWF5PM875ci_TMIKOEjSM,1282 +pytablewriter/writer/text/_latex.py,sha256=GacA95fAjdFtAPrfmsOtfw6UG0yU4rqUWOL2KxZ2XHM,6322 +pytablewriter/writer/text/_ltsv.py,sha256=xsMAMMU2F5UdznagXnQJbz62-nstSiSbjm7vgHlLm_s,1517 +pytablewriter/writer/text/_markdown.py,sha256=r_HATSDEYnAP57hoWoTImoNHFLh99c5IMEVNqwMOnnc,6193 +pytablewriter/writer/text/_mediawiki.py,sha256=dwlBbkKQGgpvt2bZVy12AVjaWKRZyP9Q1Kzomyh1cTg,3271 +pytablewriter/writer/text/_rst.py,sha256=MrJdVHxOvHRAwd0YoUXvLfGtfswKo0IRSS1fjTrVJUw,6912 +pytablewriter/writer/text/_spacealigned.py,sha256=osMTS0cvNl8qWthlUkB6noAaKGlBUL02MW-YEvMXEgA,897 +pytablewriter/writer/text/_text_writer.py,sha256=YTqd-SWS72coC9Y2nfkDqlpu44wDWiS49kBAX2AQ9JM,20542 +pytablewriter/writer/text/_toml.py,sha256=oUQRIiNIXQ47ccGasVohbDGBksMMxDETv0mnbCngVC8,2265 +pytablewriter/writer/text/_tsv.py,sha256=xLXiOznMZ8W8fKa-xfZCNlTl2Q4_HWFTUQlR4__DjuU,467 +pytablewriter/writer/text/_unicode.py,sha256=-2W2O-FaBkPDAJuwBKLEutGS09y7DcasK4Q83K0GXiE,3532 +pytablewriter/writer/text/_yaml.py,sha256=WfvH-hdBsWUt8JerzuBQ1xqJN88fLs-GMfcuJeU2QPs,1980 +pytablewriter/writer/text/sourcecode/__init__.py,sha256=25ju5UpRUV7DBNsusSj4YLzOLY5akmmEW7gKegSVtu4,297 +pytablewriter/writer/text/sourcecode/__pycache__/__init__.cpython-310.pyc,, +pytablewriter/writer/text/sourcecode/__pycache__/_javascript.cpython-310.pyc,, +pytablewriter/writer/text/sourcecode/__pycache__/_numpy.cpython-310.pyc,, +pytablewriter/writer/text/sourcecode/__pycache__/_pandas.cpython-310.pyc,, +pytablewriter/writer/text/sourcecode/__pycache__/_python.cpython-310.pyc,, +pytablewriter/writer/text/sourcecode/__pycache__/_sourcecode.cpython-310.pyc,, +pytablewriter/writer/text/sourcecode/_javascript.py,sha256=uTMT1sRuUoq-pbvE50Tjoi1j3Q6ywNFO5jK3mJljNxw,4710 +pytablewriter/writer/text/sourcecode/_numpy.py,sha256=RwPtBXuAzbc2AoA530IpJr0enpOUe0gEaUR936yRuNs,1970 +pytablewriter/writer/text/sourcecode/_pandas.py,sha256=I1RuFWVFExE1c3QX3RbdVN57B5oojXqNMwnOF0rXl_A,2556 +pytablewriter/writer/text/sourcecode/_python.py,sha256=O5ii7UOGef_u92cBL673pG_888_wLS-s6TnLDyor7V8,2541 +pytablewriter/writer/text/sourcecode/_sourcecode.py,sha256=EGKbj3qvj83LrnkgNjEUt0uzRYDXPWKJSmaXjhCaWAo,2245 diff --git a/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ee171b9ce88fa902be1b06281de4ce7ade6e10f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytablewriter-1.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pytablewriter diff --git a/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..52b44b20a37c4dd392a655d250cba7c8399c9a8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2015 David Cramer + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..803cdf6b3d5f4bcaec34bf01a51c382728b57bee --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/METADATA @@ -0,0 +1,1007 @@ +Metadata-Version: 2.1 +Name: responses +Version: 0.18.0 +Summary: A utility library for mocking out the `requests` Python library. +Home-page: https://github.com/getsentry/responses +Author: David Cramer +License: Apache 2.0 +Platform: UNKNOWN +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Software Development +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: requests (<3.0,>=2.0) +Requires-Dist: urllib3 (>=1.25.10) +Provides-Extra: tests +Requires-Dist: pytest (>=4.6) ; extra == 'tests' +Requires-Dist: coverage (>=6.0.0) ; extra == 'tests' +Requires-Dist: pytest-cov ; extra == 'tests' +Requires-Dist: pytest-localserver ; extra == 'tests' +Requires-Dist: flake8 ; extra == 'tests' +Requires-Dist: types-mock ; extra == 'tests' +Requires-Dist: types-requests ; extra == 'tests' +Requires-Dist: mypy ; extra == 'tests' + +Responses +========= + +.. image:: https://img.shields.io/pypi/v/responses.svg + :target: https://pypi.python.org/pypi/responses/ + +.. image:: https://img.shields.io/pypi/pyversions/responses.svg + :target: https://pypi.org/project/responses/ + +.. image:: https://codecov.io/gh/getsentry/responses/branch/master/graph/badge.svg + :target: https://codecov.io/gh/getsentry/responses/ + +A utility library for mocking out the ``requests`` Python library. + +.. note:: + + Responses requires Python 3.7 or newer, and requests >= 2.0 + + +Table of Contents +----------------- + +.. contents:: + + +Installing +---------- + +``pip install responses`` + + +Basics +------ + +The core of ``responses`` comes from registering mock responses: + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_simple(): + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', + json={'error': 'not found'}, status=404) + + resp = requests.get('http://twitter.com/api/1/foobar') + + assert resp.json() == {"error": "not found"} + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' + assert responses.calls[0].response.text == '{"error": "not found"}' + +If you attempt to fetch a url which doesn't hit a match, ``responses`` will raise +a ``ConnectionError``: + +.. code-block:: python + + import responses + import requests + + from requests.exceptions import ConnectionError + + @responses.activate + def test_simple(): + with pytest.raises(ConnectionError): + requests.get('http://twitter.com/api/1/foobar') + +Lastly, you can pass an ``Exception`` as the body to trigger an error on the request: + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_simple(): + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', + body=Exception('...')) + with pytest.raises(Exception): + requests.get('http://twitter.com/api/1/foobar') + + +Response Parameters +------------------- + +Responses are automatically registered via params on ``add``, but can also be +passed directly: + +.. code-block:: python + + import responses + + responses.add( + responses.Response( + method='GET', + url='http://example.com', + ) + ) + +The following attributes can be passed to a Response mock: + +method (``str``) + The HTTP method (GET, POST, etc). + +url (``str`` or compiled regular expression) + The full resource URL. + +match_querystring (``bool``) + DEPRECATED: Use ``responses.matchers.query_param_matcher`` or + ``responses.matchers.query_string_matcher`` + + Include the query string when matching requests. + Enabled by default if the response URL contains a query string, + disabled if it doesn't or the URL is a regular expression. + +body (``str`` or ``BufferedReader``) + The response body. + +json + A Python object representing the JSON response body. Automatically configures + the appropriate Content-Type. + +status (``int``) + The HTTP status code. + +content_type (``content_type``) + Defaults to ``text/plain``. + +headers (``dict``) + Response headers. + +stream (``bool``) + DEPRECATED: use ``stream`` argument in request directly + +auto_calculate_content_length (``bool``) + Disabled by default. Automatically calculates the length of a supplied string or JSON body. + +match (``list``) + A list of callbacks to match requests based on request attributes. + Current module provides multiple matchers that you can use to match: + + * body contents in JSON format + * body contents in URL encoded data format + * request query parameters + * request query string (similar to query parameters but takes string as input) + * kwargs provided to request e.g. ``stream``, ``verify`` + * 'multipart/form-data' content and headers in request + * request headers + * request fragment identifier + + Alternatively user can create custom matcher. + Read more `Matching Requests`_ + + +Matching Requests +----------------- + +Matching Request Body Contents +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When adding responses for endpoints that are sent request data you can add +matchers to ensure your code is sending the right parameters and provide +different responses based on the request body contents. ``responses`` provides +matchers for JSON and URL-encoded request bodies. + +URL-encoded data +"""""""""""""""" + +.. code-block:: python + + import responses + import requests + from responses import matchers + + @responses.activate + def test_calc_api(): + responses.add( + responses.POST, + url='http://calc.com/sum', + body="4", + match=[ + matchers.urlencoded_params_matcher({"left": "1", "right": "3"}) + ] + ) + requests.post("http://calc.com/sum", data={"left": 1, "right": 3}) + + +JSON encoded data +""""""""""""""""" + +Matching JSON encoded data can be done with ``matchers.json_params_matcher()``. + +.. code-block:: python + + import responses + import requests + from responses import matchers + + @responses.activate + def test_calc_api(): + responses.add( + method=responses.POST, + url="http://example.com/", + body="one", + match=[matchers.json_params_matcher({"page": {"name": "first", "type": "json"}})], + ) + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "application/json"}, + json={"page": {"name": "first", "type": "json"}}, + ) + + +Query Parameters Matcher +^^^^^^^^^^^^^^^^^^^^^^^^ + +Query Parameters as a Dictionary +"""""""""""""""""""""""""""""""" + +You can use the ``matchers.query_param_matcher`` function to match +against the ``params`` request parameter. Just use the same dictionary as you +will use in ``params`` argument in ``request``. + +Note, do not use query parameters as part of the URL. Avoid using ``match_querystring`` +deprecated argument. + +.. code-block:: python + + import responses + import requests + from responses import matchers + + @responses.activate + def test_calc_api(): + url = "http://example.com/test" + params = {"hello": "world", "I am": "a big test"} + responses.add( + method=responses.GET, + url=url, + body="test", + match=[matchers.query_param_matcher(params)], + match_querystring=False, + ) + + resp = requests.get(url, params=params) + + constructed_url = r"http://example.com/test?I+am=a+big+test&hello=world" + assert resp.url == constructed_url + assert resp.request.url == constructed_url + assert resp.request.params == params + + +Query Parameters as a String +"""""""""""""""""""""""""""" + +As alternative, you can use query string value in ``matchers.query_string_matcher`` to match +query parameters in your request + +.. code-block:: python + + import requests + import responses + from responses import matchers + + @responses.activate + def my_func(): + responses.add( + responses.GET, + "https://httpbin.org/get", + match=[matchers.query_string_matcher("didi=pro&test=1")], + ) + resp = requests.get("https://httpbin.org/get", params={"test": 1, "didi": "pro"}) + + my_func() + + +Request Keyword Arguments Matcher +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To validate request arguments use the ``matchers.request_kwargs_matcher`` function to match +against the request kwargs. + +Note, only arguments provided to ``matchers.request_kwargs_matcher`` will be validated. + +.. code-block:: python + + import responses + import requests + from responses import matchers + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_kwargs = { + "stream": True, + "verify": False, + } + rsps.add( + "GET", + "http://111.com", + match=[matchers.request_kwargs_matcher(req_kwargs)], + ) + + requests.get("http://111.com", stream=True) + + # >>> Arguments don't match: {stream: True, verify: True} doesn't match {stream: True, verify: False} + + +Request multipart/form-data Data Validation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To validate request body and headers for ``multipart/form-data`` data you can use +``matchers.multipart_matcher``. The ``data``, and ``files`` parameters provided will be compared +to the request: + +.. code-block:: python + + import requests + import responses + from responses.matchers import multipart_matcher + + @responses.activate + def my_func(): + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + responses.add( + responses.POST, url="http://httpbin.org/post", + match=[multipart_matcher(req_files, data=req_data)] + ) + resp = requests.post("http://httpbin.org/post", files={"file_name": b"New World!"}) + + my_func() + # >>> raises ConnectionError: multipart/form-data doesn't match. Request body differs. + +Request Fragment Identifier Validation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To validate request URL fragment identifier you can use ``matchers.fragment_identifier_matcher``. +The matcher takes fragment string (everything after ``#`` sign) as input for comparison: + +.. code-block:: python + + import requests + import responses + from responses.matchers import fragment_identifier_matcher + + @responses.activate + def run(): + url = "http://example.com?ab=xy&zed=qwe#test=1&foo=bar" + responses.add( + responses.GET, + url, + match_querystring=True, + match=[fragment_identifier_matcher("test=1&foo=bar")], + body=b"test", + ) + + # two requests to check reversed order of fragment identifier + resp = requests.get("http://example.com?ab=xy&zed=qwe#test=1&foo=bar") + resp = requests.get("http://example.com?zed=qwe&ab=xy#foo=bar&test=1") + + run() + +Request Headers Validation +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When adding responses you can specify matchers to ensure that your code is +sending the right headers and provide different responses based on the request +headers. + +.. code-block:: python + + import responses + import requests + from responses import matchers + + + @responses.activate + def test_content_type(): + responses.add( + responses.GET, + url="http://example.com/", + body="hello world", + match=[ + matchers.header_matcher({"Accept": "text/plain"}) + ] + ) + + responses.add( + responses.GET, + url="http://example.com/", + json={"content": "hello world"}, + match=[ + matchers.header_matcher({"Accept": "application/json"}) + ] + ) + + # request in reverse order to how they were added! + resp = requests.get("http://example.com/", headers={"Accept": "application/json"}) + assert resp.json() == {"content": "hello world"} + + resp = requests.get("http://example.com/", headers={"Accept": "text/plain"}) + assert resp.text == "hello world" + +Because ``requests`` will send several standard headers in addition to what was +specified by your code, request headers that are additional to the ones +passed to the matcher are ignored by default. You can change this behaviour by +passing ``strict_match=True`` to the matcher to ensure that only the headers +that you're expecting are sent and no others. Note that you will probably have +to use a ``PreparedRequest`` in your code to ensure that ``requests`` doesn't +include any additional headers. + +.. code-block:: python + + import responses + import requests + from responses import matchers + + @responses.activate + def test_content_type(): + responses.add( + responses.GET, + url="http://example.com/", + body="hello world", + match=[ + matchers.header_matcher({"Accept": "text/plain"}, strict_match=True) + ] + ) + + # this will fail because requests adds its own headers + with pytest.raises(ConnectionError): + requests.get("http://example.com/", headers={"Accept": "text/plain"}) + + # a prepared request where you overwrite the headers before sending will work + session = requests.Session() + prepped = session.prepare_request( + requests.Request( + method="GET", + url="http://example.com/", + ) + ) + prepped.headers = {"Accept": "text/plain"} + + resp = session.send(prepped) + assert resp.text == "hello world" + + +Creating Custom Matcher +^^^^^^^^^^^^^^^^^^^^^^^ + +If your application requires other encodings or different data validation you can build +your own matcher that returns ``Tuple[matches: bool, reason: str]``. +Where boolean represents ``True`` or ``False`` if the request parameters match and +the string is a reason in case of match failure. Your matcher can +expect a ``PreparedRequest`` parameter to be provided by ``responses``. + +Note, ``PreparedRequest`` is customized and has additional attributes ``params`` and ``req_kwargs``. + +Response Registry +--------------------------- + +By default, ``responses`` will search all registered ``Response`` objects and +return a match. If only one ``Response`` is registered, the registry is kept unchanged. +However, if multiple matches are found for the same request, then first match is returned and +removed from registry. + +Such behavior is suitable for most of use cases, but to handle special conditions, you can +implement custom registry which must follow interface of ``registries.FirstMatchRegistry``. +Redefining the ``find`` method will allow you to create custom search logic and return +appropriate ``Response`` + +Example that shows how to set custom registry + +.. code-block:: python + + import responses + from responses import registries + + + class CustomRegistry(registries.FirstMatchRegistry): + pass + + + """ Before tests: """ + + # using function decorator + @responses.activate(registry=CustomRegistry) + def run(): + """ Within test: <__main__.CustomRegistry object> """ + + run() + """ After test: """ + + # using context manager + with responses.RequestsMock(registry=CustomRegistry) as rsps: + """ In context manager: <__main__.CustomRegistry object> """ + + """ + After exit from context manager: + """ + +Dynamic Responses +----------------- + +You can utilize callbacks to provide dynamic responses. The callback must return +a tuple of (``status``, ``headers``, ``body``). + +.. code-block:: python + + import json + + import responses + import requests + + @responses.activate + def test_calc_api(): + + def request_callback(request): + payload = json.loads(request.body) + resp_body = {'value': sum(payload['numbers'])} + headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'} + return (200, headers, json.dumps(resp_body)) + + responses.add_callback( + responses.POST, 'http://calc.com/sum', + callback=request_callback, + content_type='application/json', + ) + + resp = requests.post( + 'http://calc.com/sum', + json.dumps({'numbers': [1, 2, 3]}), + headers={'content-type': 'application/json'}, + ) + + assert resp.json() == {'value': 6} + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://calc.com/sum' + assert responses.calls[0].response.text == '{"value": 6}' + assert ( + responses.calls[0].response.headers['request-id'] == + '728d329e-0e86-11e4-a748-0c84dc037c13' + ) + +You can also pass a compiled regex to ``add_callback`` to match multiple urls: + +.. code-block:: python + + import re, json + + from functools import reduce + + import responses + import requests + + operators = { + 'sum': lambda x, y: x+y, + 'prod': lambda x, y: x*y, + 'pow': lambda x, y: x**y + } + + @responses.activate + def test_regex_url(): + + def request_callback(request): + payload = json.loads(request.body) + operator_name = request.path_url[1:] + + operator = operators[operator_name] + + resp_body = {'value': reduce(operator, payload['numbers'])} + headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'} + return (200, headers, json.dumps(resp_body)) + + responses.add_callback( + responses.POST, + re.compile('http://calc.com/(sum|prod|pow|unsupported)'), + callback=request_callback, + content_type='application/json', + ) + + resp = requests.post( + 'http://calc.com/prod', + json.dumps({'numbers': [2, 3, 4]}), + headers={'content-type': 'application/json'}, + ) + assert resp.json() == {'value': 24} + + test_regex_url() + + +If you want to pass extra keyword arguments to the callback function, for example when reusing +a callback function to give a slightly different result, you can use ``functools.partial``: + +.. code-block:: python + + from functools import partial + + ... + + def request_callback(request, id=None): + payload = json.loads(request.body) + resp_body = {'value': sum(payload['numbers'])} + headers = {'request-id': id} + return (200, headers, json.dumps(resp_body)) + + responses.add_callback( + responses.POST, 'http://calc.com/sum', + callback=partial(request_callback, id='728d329e-0e86-11e4-a748-0c84dc037c13'), + content_type='application/json', + ) + + +You can see params passed in the original ``request`` in ``responses.calls[].request.params``: + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_request_params(): + responses.add( + method=responses.GET, + url="http://example.com?hello=world", + body="test", + match_querystring=False, + ) + + resp = requests.get('http://example.com', params={"hello": "world"}) + assert responses.calls[0].request.params == {"hello": "world"} + +Responses as a context manager +------------------------------ + +.. code-block:: python + + import responses + import requests + + def test_my_api(): + with responses.RequestsMock() as rsps: + rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{}', status=200, + content_type='application/json') + resp = requests.get('http://twitter.com/api/1/foobar') + + assert resp.status_code == 200 + + # outside the context manager requests will hit the remote server + resp = requests.get('http://twitter.com/api/1/foobar') + resp.status_code == 404 + +Responses as a pytest fixture +----------------------------- + +.. code-block:: python + + @pytest.fixture + def mocked_responses(): + with responses.RequestsMock() as rsps: + yield rsps + + def test_api(mocked_responses): + mocked_responses.add( + responses.GET, 'http://twitter.com/api/1/foobar', + body='{}', status=200, + content_type='application/json') + resp = requests.get('http://twitter.com/api/1/foobar') + assert resp.status_code == 200 + +Responses inside a unittest setUp() +----------------------------------- + +When run with unittest tests, this can be used to set up some +generic class-level responses, that may be complemented by each test + +.. code-block:: python + + class TestMyApi(unittest.TestCase): + def setUp(self): + responses.add(responses.GET, 'https://example.com', body="within setup") + # here go other self.responses.add(...) + + @responses.activate + def test_my_func(self): + responses.add( + responses.GET, + "https://httpbin.org/get", + match=[matchers.query_param_matcher({"test": "1", "didi": "pro"})], + body="within test" + ) + resp = requests.get("https://example.com") + resp2 = requests.get("https://httpbin.org/get", params={"test": "1", "didi": "pro"}) + print(resp.text) + # >>> within setup + print(resp2.text) + # >>> within test + + +Assertions on declared responses +-------------------------------- + +When used as a context manager, Responses will, by default, raise an assertion +error if a url was registered but not accessed. This can be disabled by passing +the ``assert_all_requests_are_fired`` value: + +.. code-block:: python + + import responses + import requests + + def test_my_api(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{}', status=200, + content_type='application/json') + +assert_call_count +----------------- + +Assert that the request was called exactly n times. + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_assert_call_count(): + responses.add(responses.GET, "http://example.com") + + requests.get("http://example.com") + assert responses.assert_call_count("http://example.com", 1) is True + + requests.get("http://example.com") + with pytest.raises(AssertionError) as excinfo: + responses.assert_call_count("http://example.com", 1) + assert "Expected URL 'http://example.com' to be called 1 times. Called 2 times." in str(excinfo.value) + + +Multiple Responses +------------------ + +You can also add multiple responses for the same url: + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_my_api(): + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', status=500) + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{}', status=200, + content_type='application/json') + + resp = requests.get('http://twitter.com/api/1/foobar') + assert resp.status_code == 500 + resp = requests.get('http://twitter.com/api/1/foobar') + assert resp.status_code == 200 + + +Using a callback to modify the response +--------------------------------------- + +If you use customized processing in `requests` via subclassing/mixins, or if you +have library tools that interact with `requests` at a low level, you may need +to add extended processing to the mocked Response object to fully simulate the +environment for your tests. A `response_callback` can be used, which will be +wrapped by the library before being returned to the caller. The callback +accepts a `response` as it's single argument, and is expected to return a +single `response` object. + +.. code-block:: python + + import responses + import requests + + def response_callback(resp): + resp.callback_processed = True + return resp + + with responses.RequestsMock(response_callback=response_callback) as m: + m.add(responses.GET, 'http://example.com', body=b'test') + resp = requests.get('http://example.com') + assert resp.text == "test" + assert hasattr(resp, 'callback_processed') + assert resp.callback_processed is True + + +Passing through real requests +----------------------------- + +In some cases you may wish to allow for certain requests to pass through responses +and hit a real server. This can be done with the ``add_passthru`` methods: + +.. code-block:: python + + import responses + + @responses.activate + def test_my_api(): + responses.add_passthru('https://percy.io') + +This will allow any requests matching that prefix, that is otherwise not +registered as a mock response, to passthru using the standard behavior. + +Pass through endpoints can be configured with regex patterns if you +need to allow an entire domain or path subtree to send requests: + +.. code-block:: python + + responses.add_passthru(re.compile('https://percy.io/\\w+')) + + +Lastly, you can use the `response.passthrough` attribute on `BaseResponse` or +use ``PassthroughResponse`` to enable a response to behave as a pass through. + +.. code-block:: python + + # Enable passthrough for a single response + response = Response(responses.GET, 'http://example.com', body='not used') + response.passthrough = True + responses.add(response) + + # Use PassthroughResponse + response = PassthroughResponse(responses.GET, 'http://example.com') + responses.add(response) + +Viewing/Modifying registered responses +-------------------------------------- + +Registered responses are available as a public method of the RequestMock +instance. It is sometimes useful for debugging purposes to view the stack of +registered responses which can be accessed via ``responses.registered()``. + +The ``replace`` function allows a previously registered ``response`` to be +changed. The method signature is identical to ``add``. ``response`` s are +identified using ``method`` and ``url``. Only the first matched ``response`` is +replaced. + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_replace(): + + responses.add(responses.GET, 'http://example.org', json={'data': 1}) + responses.replace(responses.GET, 'http://example.org', json={'data': 2}) + + resp = requests.get('http://example.org') + + assert resp.json() == {'data': 2} + + +The ``upsert`` function allows a previously registered ``response`` to be +changed like ``replace``. If the response is registered, the ``upsert`` function +will registered it like ``add``. + +``remove`` takes a ``method`` and ``url`` argument and will remove **all** +matched responses from the registered list. + +Finally, ``reset`` will reset all registered responses. + +Contributing +------------ + +Environment Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Responses uses several linting and autoformatting utilities, so it's important that when +submitting patches you use the appropriate toolchain: + +Clone the repository: + +.. code-block:: shell + + git clone https://github.com/getsentry/responses.git + +Create an environment (e.g. with ``virtualenv``): + +.. code-block:: shell + + virtualenv .env && source .env/bin/activate + +Configure development requirements: + +.. code-block:: shell + + make develop + + +Tests and Code Quality Validation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The easiest way to validate your code is to run tests via ``tox``. +Current ``tox`` configuration runs the same checks that are used in +GitHub Actions CI/CD pipeline. + +Please execute the following command line from the project root to validate +your code against: + +* Unit tests in all Python versions that are supported by this project +* Type validation via ``mypy`` +* All ``pre-commit`` hooks + +.. code-block:: shell + + tox + +Alternatively, you can always run a single test. See documentation below. + +Unit tests +"""""""""" + +Responses uses `Pytest `_ for +testing. You can run all tests by: + +.. code-block:: shell + + tox -e py37 + tox -e py310 + +OR manually activate required version of Python and run + +.. code-block:: shell + + pytest + +And run a single test by: + +.. code-block:: shell + + pytest -k '' + +Type Validation +""""""""""""""" + +To verify ``type`` compliance, run `mypy `_ linter: + +.. code-block:: shell + + tox -e mypy + +OR + +.. code-block:: shell + + mypy --config-file=./mypy.ini -p responses + +Code Quality and Style +"""""""""""""""""""""" + +To check code style and reformat it run: + +.. code-block:: shell + + tox -e precom + +OR + +.. code-block:: shell + + pre-commit run --all-files + +Note: on some OS, you have to use ``pre_commit`` + + diff --git a/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..5863bdbfb00ad3f99e932a1e9ace6382be94c3bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/RECORD @@ -0,0 +1,21 @@ +responses-0.18.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +responses-0.18.0.dist-info/LICENSE,sha256=SJ7LcLREfANKEJeKSwjaAVyb2fqVyjrq8hnZgVQWpnw,10835 +responses-0.18.0.dist-info/METADATA,sha256=tDP8L448eeDFehL9hduJn6ii57_r-DM2iJCGxKH62dI,29524 +responses-0.18.0.dist-info/RECORD,, +responses-0.18.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +responses-0.18.0.dist-info/top_level.txt,sha256=aQhzfC0bq4TkAaB_Yr-7cv4u2Xnc8WiVzvh4KdZo0Qo,10 +responses/__init__.py,sha256=NM8i_dk9oOQ-8rIl3NSMElIY6wrFF1h0sCgVGdhYdhw,25878 +responses/__init__.pyi,sha256=dL53mDCDkctID8Cj5OYnsiXvee8Cq09ktZQcBNJo9Hs,10487 +responses/__pycache__/__init__.cpython-310.pyc,, +responses/__pycache__/matchers.cpython-310.pyc,, +responses/__pycache__/registries.cpython-310.pyc,, +responses/__pycache__/test_matchers.cpython-310.pyc,, +responses/__pycache__/test_registries.cpython-310.pyc,, +responses/__pycache__/test_responses.cpython-310.pyc,, +responses/matchers.py,sha256=Fy_7DZUEu9bKcx5CXlYQiV_7ruTlQwhya0M0z0xyIGg,10177 +responses/matchers.pyi,sha256=dW74cbEyWEg8HAPAY_tt61H1wLGq4e3OumZi5SawVwg,946 +responses/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +responses/registries.py,sha256=w4C6BriaYFzrERHLwuV1arDs78TnLrpZOIOLIuZz3Js,2073 +responses/test_matchers.py,sha256=nAUGvfEGusnKS93-TlX6xmgqg2jruC5H87nGFlJg__U,19757 +responses/test_registries.py,sha256=Nm8YUN-Kk8nqUcOgcItLH_iJnkN6PrI9qxxudM_RqEI,1903 +responses/test_responses.py,sha256=rRvidExibOF-tKU0jTMkxNwx1shZ5JVj_OHXP-MU31w,57403 diff --git a/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..2cb24f43db3bb3264bbf9191db620a3a6a449405 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses-0.18.0.dist-info/top_level.txt @@ -0,0 +1 @@ +responses