Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_templating.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py +115 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py +149 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py +29 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py +19 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py +43 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py +105 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py +55 -0
- env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py +77 -0
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (190 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc
ADDED
Binary file (88.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_templating.cpython-310.pyc
ADDED
Binary file (3.91 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc
ADDED
Binary file (1.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (201 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc
ADDED
Binary file (92.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is auto-generated by `utils/generate_inference_types.py`.
|
2 |
+
# Do not modify it manually.
|
3 |
+
#
|
4 |
+
# ruff: noqa: F401
|
5 |
+
|
6 |
+
from .audio_classification import (
|
7 |
+
AudioClassificationInput,
|
8 |
+
AudioClassificationOutputElement,
|
9 |
+
AudioClassificationParameters,
|
10 |
+
)
|
11 |
+
from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement
|
12 |
+
from .automatic_speech_recognition import (
|
13 |
+
AutomaticSpeechRecognitionGenerationParameters,
|
14 |
+
AutomaticSpeechRecognitionInput,
|
15 |
+
AutomaticSpeechRecognitionOutput,
|
16 |
+
AutomaticSpeechRecognitionOutputChunk,
|
17 |
+
AutomaticSpeechRecognitionParameters,
|
18 |
+
)
|
19 |
+
from .base import BaseInferenceType
|
20 |
+
from .chat_completion import (
|
21 |
+
ChatCompletionInput,
|
22 |
+
ChatCompletionInputMessage,
|
23 |
+
ChatCompletionOutput,
|
24 |
+
ChatCompletionOutputChoice,
|
25 |
+
ChatCompletionOutputChoiceMessage,
|
26 |
+
ChatCompletionStreamOutput,
|
27 |
+
ChatCompletionStreamOutputChoice,
|
28 |
+
ChatCompletionStreamOutputDelta,
|
29 |
+
)
|
30 |
+
from .depth_estimation import DepthEstimationInput, DepthEstimationOutput
|
31 |
+
from .document_question_answering import (
|
32 |
+
DocumentQuestionAnsweringInput,
|
33 |
+
DocumentQuestionAnsweringInputData,
|
34 |
+
DocumentQuestionAnsweringOutputElement,
|
35 |
+
DocumentQuestionAnsweringParameters,
|
36 |
+
)
|
37 |
+
from .feature_extraction import FeatureExtractionInput
|
38 |
+
from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters
|
39 |
+
from .image_classification import (
|
40 |
+
ImageClassificationInput,
|
41 |
+
ImageClassificationOutputElement,
|
42 |
+
ImageClassificationParameters,
|
43 |
+
)
|
44 |
+
from .image_segmentation import ImageSegmentationInput, ImageSegmentationOutputElement, ImageSegmentationParameters
|
45 |
+
from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize
|
46 |
+
from .image_to_text import ImageToTextGenerationParameters, ImageToTextInput, ImageToTextOutput, ImageToTextParameters
|
47 |
+
from .object_detection import (
|
48 |
+
ObjectDetectionBoundingBox,
|
49 |
+
ObjectDetectionInput,
|
50 |
+
ObjectDetectionOutputElement,
|
51 |
+
ObjectDetectionParameters,
|
52 |
+
)
|
53 |
+
from .question_answering import (
|
54 |
+
QuestionAnsweringInput,
|
55 |
+
QuestionAnsweringInputData,
|
56 |
+
QuestionAnsweringOutputElement,
|
57 |
+
QuestionAnsweringParameters,
|
58 |
+
)
|
59 |
+
from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData
|
60 |
+
from .summarization import SummarizationGenerationParameters, SummarizationInput, SummarizationOutput
|
61 |
+
from .table_question_answering import (
|
62 |
+
TableQuestionAnsweringInput,
|
63 |
+
TableQuestionAnsweringInputData,
|
64 |
+
TableQuestionAnsweringOutputElement,
|
65 |
+
)
|
66 |
+
from .text2text_generation import Text2TextGenerationInput, Text2TextGenerationOutput, Text2TextGenerationParameters
|
67 |
+
from .text_classification import TextClassificationInput, TextClassificationOutputElement, TextClassificationParameters
|
68 |
+
from .text_generation import (
|
69 |
+
TextGenerationInput,
|
70 |
+
TextGenerationOutput,
|
71 |
+
TextGenerationOutputDetails,
|
72 |
+
TextGenerationOutputSequenceDetails,
|
73 |
+
TextGenerationOutputToken,
|
74 |
+
TextGenerationParameters,
|
75 |
+
TextGenerationPrefillToken,
|
76 |
+
TextGenerationStreamDetails,
|
77 |
+
TextGenerationStreamOutput,
|
78 |
+
)
|
79 |
+
from .text_to_audio import TextToAudioGenerationParameters, TextToAudioInput, TextToAudioOutput, TextToAudioParameters
|
80 |
+
from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters, TextToImageTargetSize
|
81 |
+
from .token_classification import (
|
82 |
+
TokenClassificationInput,
|
83 |
+
TokenClassificationOutputElement,
|
84 |
+
TokenClassificationParameters,
|
85 |
+
)
|
86 |
+
from .translation import TranslationGenerationParameters, TranslationInput, TranslationOutput
|
87 |
+
from .video_classification import (
|
88 |
+
VideoClassificationInput,
|
89 |
+
VideoClassificationOutputElement,
|
90 |
+
VideoClassificationParameters,
|
91 |
+
)
|
92 |
+
from .visual_question_answering import (
|
93 |
+
VisualQuestionAnsweringInput,
|
94 |
+
VisualQuestionAnsweringInputData,
|
95 |
+
VisualQuestionAnsweringOutputElement,
|
96 |
+
VisualQuestionAnsweringParameters,
|
97 |
+
)
|
98 |
+
from .zero_shot_classification import (
|
99 |
+
ZeroShotClassificationInput,
|
100 |
+
ZeroShotClassificationInputData,
|
101 |
+
ZeroShotClassificationOutputElement,
|
102 |
+
ZeroShotClassificationParameters,
|
103 |
+
)
|
104 |
+
from .zero_shot_image_classification import (
|
105 |
+
ZeroShotImageClassificationInput,
|
106 |
+
ZeroShotImageClassificationInputData,
|
107 |
+
ZeroShotImageClassificationOutputElement,
|
108 |
+
ZeroShotImageClassificationParameters,
|
109 |
+
)
|
110 |
+
from .zero_shot_object_detection import (
|
111 |
+
ZeroShotObjectDetectionBoundingBox,
|
112 |
+
ZeroShotObjectDetectionInput,
|
113 |
+
ZeroShotObjectDetectionInputData,
|
114 |
+
ZeroShotObjectDetectionOutputElement,
|
115 |
+
)
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc
ADDED
Binary file (1.43 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc
ADDED
Binary file (963 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc
ADDED
Binary file (2.75 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc
ADDED
Binary file (4.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc
ADDED
Binary file (2.81 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc
ADDED
Binary file (994 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc
ADDED
Binary file (2.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc
ADDED
Binary file (716 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc
ADDED
Binary file (1.42 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-310.pyc
ADDED
Binary file (1.44 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc
ADDED
Binary file (1.61 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc
ADDED
Binary file (1.67 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-310.pyc
ADDED
Binary file (2.38 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc
ADDED
Binary file (1.67 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc
ADDED
Binary file (1.87 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc
ADDED
Binary file (975 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-310.pyc
ADDED
Binary file (1.59 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc
ADDED
Binary file (1.43 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-310.pyc
ADDED
Binary file (1.64 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc
ADDED
Binary file (1.42 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc
ADDED
Binary file (4.07 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-310.pyc
ADDED
Binary file (2.36 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc
ADDED
Binary file (1.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc
ADDED
Binary file (1.69 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc
ADDED
Binary file (1.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-310.pyc
ADDED
Binary file (1.53 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc
ADDED
Binary file (1.66 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-310.pyc
ADDED
Binary file (1.69 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-310.pyc
ADDED
Binary file (1.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc
ADDED
Binary file (1.75 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class AudioToAudioInput(BaseInferenceType):
|
14 |
+
"""Inputs for Audio to Audio inference"""
|
15 |
+
|
16 |
+
inputs: Any
|
17 |
+
"""The input audio data"""
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class AudioToAudioOutputElement(BaseInferenceType):
|
22 |
+
"""Outputs of inference for the Audio To Audio task
|
23 |
+
A generated audio file with its label.
|
24 |
+
"""
|
25 |
+
|
26 |
+
blob: Any
|
27 |
+
"""The generated audio file."""
|
28 |
+
content_type: str
|
29 |
+
"""The content type of audio file."""
|
30 |
+
label: str
|
31 |
+
"""The label of the audio file."""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Contains a base class for all inference types."""
|
15 |
+
|
16 |
+
import inspect
|
17 |
+
import json
|
18 |
+
import warnings
|
19 |
+
from dataclasses import asdict, dataclass
|
20 |
+
from typing import Any, Dict, List, Type, TypeVar, Union, get_args
|
21 |
+
|
22 |
+
|
23 |
+
T = TypeVar("T", bound="BaseInferenceType")
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class BaseInferenceType(dict):
|
28 |
+
"""Base class for all inference types.
|
29 |
+
|
30 |
+
Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future.
|
31 |
+
|
32 |
+
Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields
|
33 |
+
are made optional, and non-expected fields are added as dict attributes).
|
34 |
+
"""
|
35 |
+
|
36 |
+
@classmethod
|
37 |
+
def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]:
|
38 |
+
"""Alias to parse server response and return a single instance.
|
39 |
+
|
40 |
+
See `parse_obj` for more details.
|
41 |
+
"""
|
42 |
+
output = cls.parse_obj(data)
|
43 |
+
if not isinstance(output, list):
|
44 |
+
raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.")
|
45 |
+
return output
|
46 |
+
|
47 |
+
@classmethod
|
48 |
+
def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T:
|
49 |
+
"""Alias to parse server response and return a single instance.
|
50 |
+
|
51 |
+
See `parse_obj` for more details.
|
52 |
+
"""
|
53 |
+
output = cls.parse_obj(data)
|
54 |
+
if isinstance(output, list):
|
55 |
+
raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.")
|
56 |
+
return output
|
57 |
+
|
58 |
+
@classmethod
|
59 |
+
def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]:
|
60 |
+
"""Parse server response as a dataclass or list of dataclasses.
|
61 |
+
|
62 |
+
To enable future-compatibility, we want to handle cases where the server return more fields than expected.
|
63 |
+
In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are
|
64 |
+
added as dict attributes.
|
65 |
+
"""
|
66 |
+
# Parse server response (from bytes)
|
67 |
+
if isinstance(data, bytes):
|
68 |
+
data = data.decode()
|
69 |
+
if isinstance(data, str):
|
70 |
+
data = json.loads(data)
|
71 |
+
|
72 |
+
# If a list, parse each item individually
|
73 |
+
if isinstance(data, List):
|
74 |
+
return [cls.parse_obj(d) for d in data] # type: ignore [misc]
|
75 |
+
|
76 |
+
# At this point, we expect a dict
|
77 |
+
if not isinstance(data, dict):
|
78 |
+
raise ValueError(f"Invalid data type: {type(data)}")
|
79 |
+
|
80 |
+
init_values = {}
|
81 |
+
other_values = {}
|
82 |
+
for key, value in data.items():
|
83 |
+
key = normalize_key(key)
|
84 |
+
if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init:
|
85 |
+
if isinstance(value, dict) or isinstance(value, list):
|
86 |
+
field_type = cls.__dataclass_fields__[key].type
|
87 |
+
|
88 |
+
# if `field_type` is a `BaseInferenceType`, parse it
|
89 |
+
if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType):
|
90 |
+
value = field_type.parse_obj(value)
|
91 |
+
|
92 |
+
# otherwise, recursively parse nested dataclasses (if possible)
|
93 |
+
# `get_args` returns handle Union and Optional for us
|
94 |
+
else:
|
95 |
+
expected_types = get_args(field_type)
|
96 |
+
for expected_type in expected_types:
|
97 |
+
if getattr(expected_type, "_name", None) == "List":
|
98 |
+
expected_type = get_args(expected_type)[
|
99 |
+
0
|
100 |
+
] # assume same type for all items in the list
|
101 |
+
if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType):
|
102 |
+
value = expected_type.parse_obj(value)
|
103 |
+
break
|
104 |
+
init_values[key] = value
|
105 |
+
else:
|
106 |
+
other_values[key] = value
|
107 |
+
|
108 |
+
# Make all missing fields default to None
|
109 |
+
# => ensure that dataclass initialization will never fail even if the server does not return all fields.
|
110 |
+
for key in cls.__dataclass_fields__:
|
111 |
+
if key not in init_values:
|
112 |
+
init_values[key] = None
|
113 |
+
|
114 |
+
# Initialize dataclass with expected values
|
115 |
+
item = cls(**init_values)
|
116 |
+
|
117 |
+
# Add remaining fields as dict attributes
|
118 |
+
item.update(other_values)
|
119 |
+
return item
|
120 |
+
|
121 |
+
def __post_init__(self):
|
122 |
+
self.update(asdict(self))
|
123 |
+
|
124 |
+
def __setitem__(self, __key: Any, __value: Any) -> None:
|
125 |
+
# Hacky way to keep dataclass values in sync when dict is updated
|
126 |
+
super().__setitem__(__key, __value)
|
127 |
+
if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value:
|
128 |
+
self.__setattr__(__key, __value)
|
129 |
+
return
|
130 |
+
|
131 |
+
def __setattr__(self, __name: str, __value: Any) -> None:
|
132 |
+
# Hacky way to keep dict values is sync when dataclass is updated
|
133 |
+
super().__setattr__(__name, __value)
|
134 |
+
if self.get(__name) != __value:
|
135 |
+
self[__name] = __value
|
136 |
+
return
|
137 |
+
|
138 |
+
def __getitem__(self, __key: Any) -> Any:
|
139 |
+
warnings.warn(
|
140 |
+
f"Accessing '{self.__class__.__name__}' values through dict is deprecated and "
|
141 |
+
"will be removed from version '0.25'. Use dataclass attributes instead.",
|
142 |
+
FutureWarning,
|
143 |
+
)
|
144 |
+
return super().__getitem__(__key)
|
145 |
+
|
146 |
+
|
147 |
+
def normalize_key(key: str) -> str:
|
148 |
+
# e.g "content-type" -> "content_type", "Accept" -> "accept"
|
149 |
+
return key.replace("-", "_").replace(" ", "_").lower()
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Dict, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class DepthEstimationInput(BaseInferenceType):
|
14 |
+
"""Inputs for Depth Estimation inference"""
|
15 |
+
|
16 |
+
inputs: Any
|
17 |
+
"""The input image data"""
|
18 |
+
parameters: Optional[Dict[str, Any]] = None
|
19 |
+
"""Additional inference parameters"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class DepthEstimationOutput(BaseInferenceType):
|
24 |
+
"""Outputs of inference for the Depth Estimation task"""
|
25 |
+
|
26 |
+
depth: Any
|
27 |
+
"""The predicted depth as an image"""
|
28 |
+
predicted_depth: Any
|
29 |
+
"""The predicted depth as a tensor"""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, List, Optional, Union
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class DocumentQuestionAnsweringInputData(BaseInferenceType):
|
14 |
+
"""One (document, question) pair to answer"""
|
15 |
+
|
16 |
+
image: Any
|
17 |
+
"""The image on which the question is asked"""
|
18 |
+
question: str
|
19 |
+
"""A question to ask of the document"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class DocumentQuestionAnsweringParameters(BaseInferenceType):
|
24 |
+
"""Additional inference parameters
|
25 |
+
Additional inference parameters for Document Question Answering
|
26 |
+
"""
|
27 |
+
|
28 |
+
doc_stride: Optional[int] = None
|
29 |
+
"""If the words in the document are too long to fit with the question for the model, it will
|
30 |
+
be split in several chunks with some overlap. This argument controls the size of that
|
31 |
+
overlap.
|
32 |
+
"""
|
33 |
+
handle_impossible_answer: Optional[bool] = None
|
34 |
+
"""Whether to accept impossible as an answer"""
|
35 |
+
lang: Optional[str] = None
|
36 |
+
"""Language to use while running OCR. Defaults to english."""
|
37 |
+
max_answer_len: Optional[int] = None
|
38 |
+
"""The maximum length of predicted answers (e.g., only answers with a shorter length are
|
39 |
+
considered).
|
40 |
+
"""
|
41 |
+
max_question_len: Optional[int] = None
|
42 |
+
"""The maximum length of the question after tokenization. It will be truncated if needed."""
|
43 |
+
max_seq_len: Optional[int] = None
|
44 |
+
"""The maximum length of the total sentence (context + question) in tokens of each chunk
|
45 |
+
passed to the model. The context will be split in several chunks (using doc_stride as
|
46 |
+
overlap) if needed.
|
47 |
+
"""
|
48 |
+
top_k: Optional[int] = None
|
49 |
+
"""The number of answers to return (will be chosen by order of likelihood). Can return less
|
50 |
+
than top_k answers if there are not enough options available within the context.
|
51 |
+
"""
|
52 |
+
word_boxes: Optional[List[Union[List[float], str]]] = None
|
53 |
+
"""A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
|
54 |
+
skip the OCR step and use the provided bounding boxes instead.
|
55 |
+
"""
|
56 |
+
|
57 |
+
|
58 |
+
@dataclass
|
59 |
+
class DocumentQuestionAnsweringInput(BaseInferenceType):
|
60 |
+
"""Inputs for Document Question Answering inference"""
|
61 |
+
|
62 |
+
inputs: DocumentQuestionAnsweringInputData
|
63 |
+
"""One (document, question) pair to answer"""
|
64 |
+
parameters: Optional[DocumentQuestionAnsweringParameters] = None
|
65 |
+
"""Additional inference parameters"""
|
66 |
+
|
67 |
+
|
68 |
+
@dataclass
|
69 |
+
class DocumentQuestionAnsweringOutputElement(BaseInferenceType):
|
70 |
+
"""Outputs of inference for the Document Question Answering task"""
|
71 |
+
|
72 |
+
answer: str
|
73 |
+
"""The answer to the question."""
|
74 |
+
end: int
|
75 |
+
"""The end word index of the answer (in the OCR’d version of the input or provided word
|
76 |
+
boxes).
|
77 |
+
"""
|
78 |
+
score: float
|
79 |
+
"""The probability associated to the answer."""
|
80 |
+
start: int
|
81 |
+
"""The start word index of the answer (in the OCR’d version of the input or provided word
|
82 |
+
boxes).
|
83 |
+
"""
|
84 |
+
words: List[int]
|
85 |
+
"""The index of each word/box pair that is in the answer"""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Dict, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class FeatureExtractionInput(BaseInferenceType):
|
14 |
+
"""Inputs for Text Embedding inference"""
|
15 |
+
|
16 |
+
inputs: str
|
17 |
+
"""The text to get the embeddings of"""
|
18 |
+
parameters: Optional[Dict[str, Any]] = None
|
19 |
+
"""Additional inference parameters"""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Literal, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class ImageClassificationParameters(BaseInferenceType):
|
17 |
+
"""Additional inference parameters
|
18 |
+
Additional inference parameters for Image Classification
|
19 |
+
"""
|
20 |
+
|
21 |
+
function_to_apply: Optional["ClassificationOutputTransform"] = None
|
22 |
+
top_k: Optional[int] = None
|
23 |
+
"""When specified, limits the output to the top K most probable classes."""
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class ImageClassificationInput(BaseInferenceType):
|
28 |
+
"""Inputs for Image Classification inference"""
|
29 |
+
|
30 |
+
inputs: Any
|
31 |
+
"""The input image data"""
|
32 |
+
parameters: Optional[ImageClassificationParameters] = None
|
33 |
+
"""Additional inference parameters"""
|
34 |
+
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class ImageClassificationOutputElement(BaseInferenceType):
|
38 |
+
"""Outputs of inference for the Image Classification task"""
|
39 |
+
|
40 |
+
label: str
|
41 |
+
"""The predicted class label."""
|
42 |
+
score: float
|
43 |
+
"""The corresponding probability."""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Literal, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class ImageSegmentationParameters(BaseInferenceType):
|
17 |
+
"""Additional inference parameters
|
18 |
+
Additional inference parameters for Image Segmentation
|
19 |
+
"""
|
20 |
+
|
21 |
+
mask_threshold: Optional[float] = None
|
22 |
+
"""Threshold to use when turning the predicted masks into binary values."""
|
23 |
+
overlap_mask_area_threshold: Optional[float] = None
|
24 |
+
"""Mask overlap threshold to eliminate small, disconnected segments."""
|
25 |
+
subtask: Optional["ImageSegmentationSubtask"] = None
|
26 |
+
"""Segmentation task to be performed, depending on model capabilities."""
|
27 |
+
threshold: Optional[float] = None
|
28 |
+
"""Probability threshold to filter out predicted masks."""
|
29 |
+
|
30 |
+
|
31 |
+
@dataclass
|
32 |
+
class ImageSegmentationInput(BaseInferenceType):
|
33 |
+
"""Inputs for Image Segmentation inference"""
|
34 |
+
|
35 |
+
inputs: Any
|
36 |
+
"""The input image data"""
|
37 |
+
parameters: Optional[ImageSegmentationParameters] = None
|
38 |
+
"""Additional inference parameters"""
|
39 |
+
|
40 |
+
|
41 |
+
@dataclass
|
42 |
+
class ImageSegmentationOutputElement(BaseInferenceType):
|
43 |
+
"""Outputs of inference for the Image Segmentation task
|
44 |
+
A predicted mask / segment
|
45 |
+
"""
|
46 |
+
|
47 |
+
label: str
|
48 |
+
"""The label of the predicted segment"""
|
49 |
+
mask: Any
|
50 |
+
"""The corresponding mask as a black-and-white image"""
|
51 |
+
score: Optional[float] = None
|
52 |
+
"""The score or confidence degreee the model has"""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, List, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class ImageToImageTargetSize(BaseInferenceType):
|
14 |
+
"""The size in pixel of the output image"""
|
15 |
+
|
16 |
+
height: int
|
17 |
+
width: int
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class ImageToImageParameters(BaseInferenceType):
|
22 |
+
"""Additional inference parameters
|
23 |
+
Additional inference parameters for Image To Image
|
24 |
+
"""
|
25 |
+
|
26 |
+
guidance_scale: Optional[float] = None
|
27 |
+
"""For diffusion models. A higher guidance scale value encourages the model to generate
|
28 |
+
images closely linked to the text prompt at the expense of lower image quality.
|
29 |
+
"""
|
30 |
+
negative_prompt: Optional[List[str]] = None
|
31 |
+
"""One or several prompt to guide what NOT to include in image generation."""
|
32 |
+
num_inference_steps: Optional[int] = None
|
33 |
+
"""For diffusion models. The number of denoising steps. More denoising steps usually lead to
|
34 |
+
a higher quality image at the expense of slower inference.
|
35 |
+
"""
|
36 |
+
target_size: Optional[ImageToImageTargetSize] = None
|
37 |
+
"""The size in pixel of the output image"""
|
38 |
+
|
39 |
+
|
40 |
+
@dataclass
|
41 |
+
class ImageToImageInput(BaseInferenceType):
|
42 |
+
"""Inputs for Image To Image inference"""
|
43 |
+
|
44 |
+
inputs: Any
|
45 |
+
"""The input image data"""
|
46 |
+
parameters: Optional[ImageToImageParameters] = None
|
47 |
+
"""Additional inference parameters"""
|
48 |
+
|
49 |
+
|
50 |
+
@dataclass
|
51 |
+
class ImageToImageOutput(BaseInferenceType):
|
52 |
+
"""Outputs of inference for the Image To Image task"""
|
53 |
+
|
54 |
+
image: Any
|
55 |
+
"""The output image"""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Literal, Optional, Union
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
EarlyStoppingEnum = Literal["never"]
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class ImageToTextGenerationParameters(BaseInferenceType):
|
17 |
+
"""Parametrization of the text generation process
|
18 |
+
Ad-hoc parametrization of the text generation process
|
19 |
+
"""
|
20 |
+
|
21 |
+
do_sample: Optional[bool] = None
|
22 |
+
"""Whether to use sampling instead of greedy decoding when generating new tokens."""
|
23 |
+
early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None
|
24 |
+
"""Controls the stopping condition for beam-based methods."""
|
25 |
+
epsilon_cutoff: Optional[float] = None
|
26 |
+
"""If set to float strictly between 0 and 1, only tokens with a conditional probability
|
27 |
+
greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
28 |
+
3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
29 |
+
Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
30 |
+
"""
|
31 |
+
eta_cutoff: Optional[float] = None
|
32 |
+
"""Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
33 |
+
float strictly between 0 and 1, a token is only considered if it is greater than either
|
34 |
+
eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
35 |
+
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
36 |
+
the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
37 |
+
See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
38 |
+
for more details.
|
39 |
+
"""
|
40 |
+
max_length: Optional[int] = None
|
41 |
+
"""The maximum length (in tokens) of the generated text, including the input."""
|
42 |
+
max_new_tokens: Optional[int] = None
|
43 |
+
"""The maximum number of tokens to generate. Takes precedence over maxLength."""
|
44 |
+
min_length: Optional[int] = None
|
45 |
+
"""The minimum length (in tokens) of the generated text, including the input."""
|
46 |
+
min_new_tokens: Optional[int] = None
|
47 |
+
"""The minimum number of tokens to generate. Takes precedence over maxLength."""
|
48 |
+
num_beam_groups: Optional[int] = None
|
49 |
+
"""Number of groups to divide num_beams into in order to ensure diversity among different
|
50 |
+
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
51 |
+
"""
|
52 |
+
num_beams: Optional[int] = None
|
53 |
+
"""Number of beams to use for beam search."""
|
54 |
+
penalty_alpha: Optional[float] = None
|
55 |
+
"""The value balances the model confidence and the degeneration penalty in contrastive
|
56 |
+
search decoding.
|
57 |
+
"""
|
58 |
+
temperature: Optional[float] = None
|
59 |
+
"""The value used to modulate the next token probabilities."""
|
60 |
+
top_k: Optional[int] = None
|
61 |
+
"""The number of highest probability vocabulary tokens to keep for top-k-filtering."""
|
62 |
+
top_p: Optional[float] = None
|
63 |
+
"""If set to float < 1, only the smallest set of most probable tokens with probabilities
|
64 |
+
that add up to top_p or higher are kept for generation.
|
65 |
+
"""
|
66 |
+
typical_p: Optional[float] = None
|
67 |
+
"""Local typicality measures how similar the conditional probability of predicting a target
|
68 |
+
token next is to the expected conditional probability of predicting a random token next,
|
69 |
+
given the partial text already generated. If set to float < 1, the smallest set of the
|
70 |
+
most locally typical tokens with probabilities that add up to typical_p or higher are
|
71 |
+
kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
72 |
+
"""
|
73 |
+
use_cache: Optional[bool] = None
|
74 |
+
"""Whether the model should use the past last key/values attentions to speed up decoding"""
|
75 |
+
|
76 |
+
|
77 |
+
@dataclass
|
78 |
+
class ImageToTextParameters(BaseInferenceType):
|
79 |
+
"""Additional inference parameters
|
80 |
+
Additional inference parameters for Image To Text
|
81 |
+
"""
|
82 |
+
|
83 |
+
generate: Optional[ImageToTextGenerationParameters] = None
|
84 |
+
"""Parametrization of the text generation process"""
|
85 |
+
max_new_tokens: Optional[int] = None
|
86 |
+
"""The amount of maximum tokens to generate."""
|
87 |
+
|
88 |
+
|
89 |
+
@dataclass
|
90 |
+
class ImageToTextInput(BaseInferenceType):
|
91 |
+
"""Inputs for Image To Text inference"""
|
92 |
+
|
93 |
+
inputs: Any
|
94 |
+
"""The input image data"""
|
95 |
+
parameters: Optional[ImageToTextParameters] = None
|
96 |
+
"""Additional inference parameters"""
|
97 |
+
|
98 |
+
|
99 |
+
@dataclass
|
100 |
+
class ImageToTextOutput(BaseInferenceType):
|
101 |
+
"""Outputs of inference for the Image To Text task"""
|
102 |
+
|
103 |
+
generated_text: Any
|
104 |
+
image_to_text_output_generated_text: Optional[str] = None
|
105 |
+
"""The generated text."""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Any, Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class ObjectDetectionParameters(BaseInferenceType):
|
14 |
+
"""Additional inference parameters
|
15 |
+
Additional inference parameters for Object Detection
|
16 |
+
"""
|
17 |
+
|
18 |
+
threshold: Optional[float] = None
|
19 |
+
"""The probability necessary to make a prediction."""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class ObjectDetectionInput(BaseInferenceType):
|
24 |
+
"""Inputs for Object Detection inference"""
|
25 |
+
|
26 |
+
inputs: Any
|
27 |
+
"""The input image data"""
|
28 |
+
parameters: Optional[ObjectDetectionParameters] = None
|
29 |
+
"""Additional inference parameters"""
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class ObjectDetectionBoundingBox(BaseInferenceType):
|
34 |
+
"""The predicted bounding box. Coordinates are relative to the top left corner of the input
|
35 |
+
image.
|
36 |
+
"""
|
37 |
+
|
38 |
+
xmax: int
|
39 |
+
xmin: int
|
40 |
+
ymax: int
|
41 |
+
ymin: int
|
42 |
+
|
43 |
+
|
44 |
+
@dataclass
|
45 |
+
class ObjectDetectionOutputElement(BaseInferenceType):
|
46 |
+
"""Outputs of inference for the Object Detection task"""
|
47 |
+
|
48 |
+
box: ObjectDetectionBoundingBox
|
49 |
+
"""The predicted bounding box. Coordinates are relative to the top left corner of the input
|
50 |
+
image.
|
51 |
+
"""
|
52 |
+
label: str
|
53 |
+
"""The predicted label for the bounding box"""
|
54 |
+
score: float
|
55 |
+
"""The associated score / probability"""
|
env-llmeval/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
2 |
+
#
|
3 |
+
# See:
|
4 |
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
5 |
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Optional
|
8 |
+
|
9 |
+
from .base import BaseInferenceType
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class QuestionAnsweringInputData(BaseInferenceType):
|
14 |
+
"""One (context, question) pair to answer"""
|
15 |
+
|
16 |
+
context: str
|
17 |
+
"""The context to be used for answering the question"""
|
18 |
+
question: str
|
19 |
+
"""The question to be answered"""
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class QuestionAnsweringParameters(BaseInferenceType):
|
24 |
+
"""Additional inference parameters
|
25 |
+
Additional inference parameters for Question Answering
|
26 |
+
"""
|
27 |
+
|
28 |
+
align_to_words: Optional[bool] = None
|
29 |
+
"""Attempts to align the answer to real words. Improves quality on space separated
|
30 |
+
languages. Might hurt on non-space-separated languages (like Japanese or Chinese)
|
31 |
+
"""
|
32 |
+
doc_stride: Optional[int] = None
|
33 |
+
"""If the context is too long to fit with the question for the model, it will be split in
|
34 |
+
several chunks with some overlap. This argument controls the size of that overlap.
|
35 |
+
"""
|
36 |
+
handle_impossible_answer: Optional[bool] = None
|
37 |
+
"""Whether to accept impossible as an answer."""
|
38 |
+
max_answer_len: Optional[int] = None
|
39 |
+
"""The maximum length of predicted answers (e.g., only answers with a shorter length are
|
40 |
+
considered).
|
41 |
+
"""
|
42 |
+
max_question_len: Optional[int] = None
|
43 |
+
"""The maximum length of the question after tokenization. It will be truncated if needed."""
|
44 |
+
max_seq_len: Optional[int] = None
|
45 |
+
"""The maximum length of the total sentence (context + question) in tokens of each chunk
|
46 |
+
passed to the model. The context will be split in several chunks (using docStride as
|
47 |
+
overlap) if needed.
|
48 |
+
"""
|
49 |
+
top_k: Optional[int] = None
|
50 |
+
"""The number of answers to return (will be chosen by order of likelihood). Note that we
|
51 |
+
return less than topk answers if there are not enough options available within the
|
52 |
+
context.
|
53 |
+
"""
|
54 |
+
|
55 |
+
|
56 |
+
@dataclass
|
57 |
+
class QuestionAnsweringInput(BaseInferenceType):
|
58 |
+
"""Inputs for Question Answering inference"""
|
59 |
+
|
60 |
+
inputs: QuestionAnsweringInputData
|
61 |
+
"""One (context, question) pair to answer"""
|
62 |
+
parameters: Optional[QuestionAnsweringParameters] = None
|
63 |
+
"""Additional inference parameters"""
|
64 |
+
|
65 |
+
|
66 |
+
@dataclass
|
67 |
+
class QuestionAnsweringOutputElement(BaseInferenceType):
|
68 |
+
"""Outputs of inference for the Question Answering task"""
|
69 |
+
|
70 |
+
answer: str
|
71 |
+
"""The answer to the question."""
|
72 |
+
end: int
|
73 |
+
"""The character position in the input where the answer ends."""
|
74 |
+
score: float
|
75 |
+
"""The probability associated to the answer."""
|
76 |
+
start: int
|
77 |
+
"""The character position in the input where the answer begins."""
|