input
stringlengths
33
5k
output
stringlengths
32
5k
import os import sys import pkg_resources from setuptools import find_packages, setup def read_version(fname="whisper/version.py"): exec(compile(open(fname, encoding="utf-8").read(), fname, "exec")) return locals()["__version__"] requirements = [] if sys.platform.startswith("linux"): triton_requirement = "triton>=2.0.0.dev20221202" try: import re import subprocess version_line = ( subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1] ) major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0] if (int(major), int(minor)) < (11, 4): # the last version supporting CUDA < 11.4 triton_requirement = "triton==2.0.0.dev20221011" except (IndexError, OSError, subprocess.SubprocessError): pass requirements.append(triton_requirement) setup( name="openai-whisper", py_modules=["whisper"], version=read_version(), description="Robust Speech Recognition via Large-Scale Weak Supervision", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", readme="README.md", python_requires=">=3.8", author="OpenAI", url="https://github.com/openai/whisper", license="MIT", packages=find_packages(exclude=["tests*"]), install_requires=requirements + [ str(r) for r in pkg_resources.parse_requirements( open(os.path.join(os.path.dirname(__file__), "requirements.txt")) ) ], entry_points={ "console_scripts": ["whisper=whisper.transcribe:cli"], }, include_package_data=True, extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]}, )
import os import sys import pkg_resources from setuptools import find_packages, setup def read_version(fname="whisper/version.py"): exec(compile(open(fname, encoding="utf-8").read(), fname, "exec")) return locals()["__version__"] requirements = [] if sys.platform.startswith("linux"): triton_requirement = "triton>=2.0.0.dev20221202" try: import re import subprocess version_line = ( subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1] ) major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0] if (int(major), int(minor)) < (11, 4): # the last version supporting CUDA < 11.4 triton_requirement = "triton==2.0.0.dev20221011" except (IndexError, OSError, subprocess.SubprocessError): pass requirements.append(triton_requirement) setup( name="openai-whisper", py_modules=["whisper"], version=read_version(), description="Robust Speech Recognition via Large-Scale Weak Supervision", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", readme="README.md", python_requires=">=3.7", author="OpenAI", url="https://github.com/openai/whisper", license="MIT", packages=find_packages(exclude=["tests*"]), install_requires=requirements + [ str(r) for r in pkg_resources.parse_requirements( open(os.path.join(os.path.dirname(__file__), "requirements.txt")) ) ], entry_points={ "console_scripts": ["whisper=whisper.transcribe:cli"], }, include_package_data=True, extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]}, )
from typing import List import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post_init__(self): super().__post_init__() class AudioFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Audio BASE_COLUMN_NAME = "audio" BUILDER_CONFIG_CLASS = AudioFolderConfig EXTENSIONS: List[str] # definition at the bottom of the script # Obtained with: # ``` # import soundfile as sf # # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] # # # .opus decoding is supported if libsndfile >= 1.0.31: # AUDIO_EXTENSIONS.extend([".opus"]) # ``` # We intentionally do not run this code on launch because: # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic AUDIO_EXTENSIONS = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post_init__(self): super().__post_init__() class AudioFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Audio BASE_COLUMN_NAME = "audio" BUILDER_CONFIG_CLASS = AudioFolderConfig EXTENSIONS: List[str] # definition at the bottom of the script CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label") # Obtained with: # ``` # import soundfile as sf # # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] # # # .opus decoding is supported if libsndfile >= 1.0.31: # AUDIO_EXTENSIONS.extend([".opus"]) # ``` # We intentionally do not run this code on launch because: # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic AUDIO_EXTENSIONS = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
_base_ = [ './bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_' 'test-mot17halfval.py' ] # fp16 settings optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic') val_cfg = dict(type='ValLoop', fp16=True) test_cfg = dict(type='TestLoop', fp16=True)
_base_ = [ './bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_' 'test-mot17halfval.py' ] # fp16 settings optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic') test_cfg = dict(type='TestLoop', fp16=True)
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import HunyuanDiT2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class HunyuanDiTTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanDiT2DModel main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 2 num_channels = 4 height = width = 8 embedding_dim = 8 sequence_length = 4 sequence_length_t5 = 4 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) text_embedding_mask = torch.ones(size=(batch_size, sequence_length)).to(torch_device) encoder_hidden_states_t5 = torch.randn((batch_size, sequence_length_t5, embedding_dim)).to(torch_device) text_embedding_mask_t5 = torch.ones(size=(batch_size, sequence_length_t5)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,), dtype=encoder_hidden_states.dtype).to(torch_device) original_size = [1024, 1024] target_size = [16, 16] crops_coords_top_left = [0, 0] add_time_ids = list(original_size + target_size + crops_coords_top_left) add_time_ids = torch.tensor([add_time_ids, add_time_ids], dtype=encoder_hidden_states.dtype).to(torch_device) style = torch.zeros(size=(batch_size,), dtype=int).to(torch_device) image_rotary_emb = [ torch.ones(size=(1, 8), dtype=encoder_hidden_states.dtype), torch.zeros(size=(1, 8), dtype=encoder_hidden_states.dtype), ] return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "text_embedding_mask": text_embedding_mask, "encoder_hidden_states_t5": encoder_hidden_states_t5, "text_embedding_mask_t5": text_embedding_mask_t5, "timestep": timestep, "image_meta_size": add_time_ids, "style": style, "image_rotary_emb": image_rotary_emb, } @property def input_shape(self): return (4, 8, 8) @property def output_shape(self): return (8, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 8, "patch_size": 2, "in_channels": 4, "num_layers": 1, "attention_head_dim": 8, "num_attention_heads": 2, "cross_attention_dim": 8, "cross_attention_dim_t5": 8, "pooled_projection_dim": 4, "hidden_size": 16, "text_len": 4, "text_len_t5": 4, "activation_fn": "gelu-approximate", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output( expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape ) @unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0") def test_set_xformers_attn_processor_for_determinism(self): pass @unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0") def test_set_attn_processor_for_determinism(self): pass
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import HunyuanDiT2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class HunyuanDiTTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanDiT2DModel main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 2 num_channels = 4 height = width = 8 embedding_dim = 8 sequence_length = 4 sequence_length_t5 = 4 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) text_embedding_mask = torch.ones(size=(batch_size, sequence_length)).to(torch_device) encoder_hidden_states_t5 = torch.randn((batch_size, sequence_length_t5, embedding_dim)).to(torch_device) text_embedding_mask_t5 = torch.ones(size=(batch_size, sequence_length_t5)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,), dtype=encoder_hidden_states.dtype).to(torch_device) original_size = [1024, 1024] target_size = [16, 16] crops_coords_top_left = [0, 0] add_time_ids = list(original_size + target_size + crops_coords_top_left) add_time_ids = torch.tensor([add_time_ids, add_time_ids], dtype=encoder_hidden_states.dtype).to(torch_device) style = torch.zeros(size=(batch_size,), dtype=int).to(torch_device) image_rotary_emb = [ torch.ones(size=(1, 8), dtype=encoder_hidden_states.dtype), torch.zeros(size=(1, 8), dtype=encoder_hidden_states.dtype), ] return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "text_embedding_mask": text_embedding_mask, "encoder_hidden_states_t5": encoder_hidden_states_t5, "text_embedding_mask_t5": text_embedding_mask_t5, "timestep": timestep, "image_meta_size": add_time_ids, "style": style, "image_rotary_emb": image_rotary_emb, } @property def input_shape(self): return (4, 8, 8) @property def output_shape(self): return (8, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 8, "patch_size": 2, "in_channels": 4, "num_layers": 1, "attention_head_dim": 8, "num_attention_heads": 2, "cross_attention_dim": 8, "cross_attention_dim_t5": 8, "pooled_projection_dim": 4, "hidden_size": 16, "text_len": 4, "text_len_t5": 4, "activation_fn": "gelu-approximate", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output( expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape ) @unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0") def test_set_xformers_attn_processor_for_determinism(self): pass @unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0") def test_set_attn_processor_for_determinism(self): pass
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators Extend this class and implement __call__ for custom evaluators. """ def __init__(self): """ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric`` attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments. The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary metric, i.e. the one that is used for model selection and/or logging. """ self.greater_is_better = True self.primary_metric = None def __call__( self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1 ) -> float | dict[str, float]: """ This is called during training to evaluate the model. It returns a score for the evaluation with a higher score indicating a better result. Args: model: the model to evaluate output_path: path where predictions and metrics are written to epoch: the epoch where the evaluation takes place. This is used for the file prefixes. If this is -1, then we assume evaluation on test data. steps: the steps in the current epoch at time of the evaluation. This is used for the file prefixes. If this is -1, then we assume evaluation at the end of the epoch. Returns: Either a score for the evaluation with a higher score indicating a better result, or a dictionary with scores. If the latter is chosen, then `evaluator.primary_metric` must be defined """ pass def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]: if not name: return metrics metrics = {name + "_" + key: float(value) for key, value in metrics.items()} if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"): self.primary_metric = name + "_" + self.primary_metric return metrics def store_metrics_in_model_card_data(self, model: SentenceTransformer, metrics: dict[str, Any]) -> None: model.model_card_data.set_evaluation_metrics(self, metrics) @property def description(self) -> str: """ Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification 1. Remove "Evaluator" from the class name 2. Add a space before every capital letter """ class_name = self.__class__.__name__ try: index = class_name.index("Evaluator") class_name = class_name[:index] except IndexError: pass return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators Extend this class and implement __call__ for custom evaluators. """ def __init__(self): """ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric`` attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments. The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary metric, i.e. the one that is used for model selection and/or logging. """ self.greater_is_better = True self.primary_metric = None def __call__( self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1 ) -> float | dict[str, float]: """ This is called during training to evaluate the model. It returns a score for the evaluation with a higher score indicating a better result. Args: model: the model to evaluate output_path: path where predictions and metrics are written to epoch: the epoch where the evaluation takes place. This is used for the file prefixes. If this is -1, then we assume evaluation on test data. steps: the steps in the current epoch at time of the evaluation. This is used for the file prefixes. If this is -1, then we assume evaluation at the end of the epoch. Returns: Either a score for the evaluation with a higher score indicating a better result, or a dictionary with scores. If the latter is chosen, then `evaluator.primary_metric` must be defined """ pass def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]: if not name: return metrics metrics = {name + "_" + key: value for key, value in metrics.items()} if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"): self.primary_metric = name + "_" + self.primary_metric return metrics def store_metrics_in_model_card_data(self, model: SentenceTransformer, metrics: dict[str, Any]) -> None: model.model_card_data.set_evaluation_metrics(self, metrics) @property def description(self) -> str: """ Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification 1. Remove "Evaluator" from the class name 2. Add a space before every capital letter """ class_name = self.__class__.__name__ try: index = class_name.index("Evaluator") class_name = class_name[:index] except IndexError: pass return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
from base64 import b64encode from typing import Optional from urllib.parse import urlencode from backend.data.model import OAuth2Credentials from backend.integrations.providers import ProviderName from backend.util.request import requests from .base import BaseOAuthHandler class NotionOAuthHandler(BaseOAuthHandler): """ Based on the documentation at https://developers.notion.com/docs/authorization Notes: - Notion uses non-expiring access tokens and therefore doesn't have a refresh flow - Notion doesn't use scopes """ PROVIDER_NAME = ProviderName.NOTION def __init__(self, client_id: str, client_secret: str, redirect_uri: str): self.client_id = client_id self.client_secret = client_secret self.redirect_uri = redirect_uri self.auth_base_url = "https://api.notion.com/v1/oauth/authorize" self.token_url = "https://api.notion.com/v1/oauth/token" def get_login_url( self, scopes: list[str], state: str, code_challenge: Optional[str] ) -> str: params = { "client_id": self.client_id, "redirect_uri": self.redirect_uri, "response_type": "code", "owner": "user", "state": state, } return f"{self.auth_base_url}?{urlencode(params)}" def exchange_code_for_tokens( self, code: str, scopes: list[str], code_verifier: Optional[str] ) -> OAuth2Credentials: request_body = { "grant_type": "authorization_code", "code": code, "redirect_uri": self.redirect_uri, } auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode() headers = { "Authorization": f"Basic {auth_str}", "Accept": "application/json", } response = requests.post(self.token_url, json=request_body, headers=headers) token_data = response.json() # Email is only available for non-bot users email = ( token_data["owner"]["person"]["email"] if "person" in token_data["owner"] and "email" in token_data["owner"]["person"] else None ) return OAuth2Credentials( provider=self.PROVIDER_NAME, title=token_data.get("workspace_name"), username=email, access_token=token_data["access_token"], refresh_token=None, access_token_expires_at=None, # Notion tokens don't expire refresh_token_expires_at=None, scopes=[], metadata={ "owner": token_data["owner"], "bot_id": token_data["bot_id"], "workspace_id": token_data["workspace_id"], "workspace_name": token_data.get("workspace_name"), "workspace_icon": token_data.get("workspace_icon"), }, ) def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: # Notion doesn't support token revocation return False def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: # Notion doesn't support token refresh return credentials def needs_refresh(self, credentials: OAuth2Credentials) -> bool: # Notion access tokens don't expire return False
from base64 import b64encode from urllib.parse import urlencode from backend.data.model import OAuth2Credentials from backend.integrations.providers import ProviderName from backend.util.request import requests from .base import BaseOAuthHandler class NotionOAuthHandler(BaseOAuthHandler): """ Based on the documentation at https://developers.notion.com/docs/authorization Notes: - Notion uses non-expiring access tokens and therefore doesn't have a refresh flow - Notion doesn't use scopes """ PROVIDER_NAME = ProviderName.NOTION def __init__(self, client_id: str, client_secret: str, redirect_uri: str): self.client_id = client_id self.client_secret = client_secret self.redirect_uri = redirect_uri self.auth_base_url = "https://api.notion.com/v1/oauth/authorize" self.token_url = "https://api.notion.com/v1/oauth/token" def get_login_url(self, scopes: list[str], state: str) -> str: params = { "client_id": self.client_id, "redirect_uri": self.redirect_uri, "response_type": "code", "owner": "user", "state": state, } return f"{self.auth_base_url}?{urlencode(params)}" def exchange_code_for_tokens( self, code: str, scopes: list[str] ) -> OAuth2Credentials: request_body = { "grant_type": "authorization_code", "code": code, "redirect_uri": self.redirect_uri, } auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode() headers = { "Authorization": f"Basic {auth_str}", "Accept": "application/json", } response = requests.post(self.token_url, json=request_body, headers=headers) token_data = response.json() # Email is only available for non-bot users email = ( token_data["owner"]["person"]["email"] if "person" in token_data["owner"] and "email" in token_data["owner"]["person"] else None ) return OAuth2Credentials( provider=self.PROVIDER_NAME, title=token_data.get("workspace_name"), username=email, access_token=token_data["access_token"], refresh_token=None, access_token_expires_at=None, # Notion tokens don't expire refresh_token_expires_at=None, scopes=[], metadata={ "owner": token_data["owner"], "bot_id": token_data["bot_id"], "workspace_id": token_data["workspace_id"], "workspace_name": token_data.get("workspace_name"), "workspace_icon": token_data.get("workspace_icon"), }, ) def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: # Notion doesn't support token revocation return False def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: # Notion doesn't support token refresh return credentials def needs_refresh(self, credentials: OAuth2Credentials) -> bool: # Notion access tokens don't expire return False
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import Hook class TestHook: def test_before_run(self): hook = Hook() runner = Mock() hook.before_run(runner) def test_after_run(self): hook = Hook() runner = Mock() hook.after_run(runner) def test_before_epoch(self): hook = Hook() runner = Mock() hook._before_epoch(runner) def test_after_epoch(self): hook = Hook() runner = Mock() hook._after_epoch(runner) def test_before_iter(self): hook = Hook() runner = Mock() data_batch = {} hook._before_iter(runner, data_batch) def test_after_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook._after_iter(runner, data_batch, outputs) def test_before_save_checkpoint(self): hook = Hook() runner = Mock() checkpoint = {} hook.before_save_checkpoint(runner, checkpoint) def test_after_load_checkpoint(self): hook = Hook() runner = Mock() checkpoint = {} hook.after_load_checkpoint(runner, checkpoint) def test_before_train_epoch(self): hook = Hook() runner = Mock() hook.before_train_epoch(runner) def test_before_val_epoch(self): hook = Hook() runner = Mock() hook.before_val_epoch(runner) def test_before_test_epoch(self): hook = Hook() runner = Mock() hook.before_test_epoch(runner) def test_after_train_epoch(self): hook = Hook() runner = Mock() hook.after_train_epoch(runner) def test_after_val_epoch(self): hook = Hook() runner = Mock() hook.after_val_epoch(runner) def test_after_test_epoch(self): hook = Hook() runner = Mock() hook.after_test_epoch(runner) def test_before_train_iter(self): hook = Hook() runner = Mock() data_batch = {} hook.before_train_iter(runner, data_batch) def test_before_val_iter(self): hook = Hook() runner = Mock() data_batch = {} hook.before_val_iter(runner, data_batch) def test_before_test_iter(self): hook = Hook() runner = Mock() data_batch = {} hook.before_test_iter(runner, data_batch) def test_after_train_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook.after_train_iter(runner, data_batch, outputs) def test_after_val_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook.after_val_iter(runner, data_batch, outputs) def test_after_test_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook.after_test_iter(runner, data_batch, outputs) def test_every_n_epochs(self): hook = Hook() runner = Mock() for i in range(100): runner.epoch = i return_val = hook.every_n_epochs(runner, 3) if (i + 1) % 3 == 0: assert return_val else: assert not return_val def test_every_n_inner_iters(self): hook = Hook() for i in range(100): return_val = hook.every_n_inner_iters(i, 3) if (i + 1) % 3 == 0: assert return_val else: assert not return_val def test_every_n_train_iters(self): hook = Hook() runner = Mock() for i in range(100): runner.iter = i return_val = hook.every_n_train_iters(runner, 3) if (i + 1) % 3 == 0: assert return_val else: assert not return_val def test_end_of_epoch(self): hook = Hook() # last inner iter batch_idx = 1 dataloader = Mock() dataloader.__len__ = Mock(return_value=2) return_val = hook.end_of_epoch(dataloader, batch_idx) assert return_val # not the last inner iter batch_idx = 0 return_val = hook.end_of_epoch(dataloader, batch_idx) assert not return_val def test_is_last_train_epoch(self): hook = Hook() runner = Mock() # last epoch runner.epoch = 1 runner.max_epochs = 2 return_val = hook.is_last_train_epoch(runner) assert return_val # not the last epoch runner.max_epochs = 0 return_val = hook.is_last_train_epoch(runner) assert not return_val def test_is_last_train_iter(self): hook = Hook() runner = Mock() # last iter runner.iter = 1 runner.max_iters = 2 return_val = hook.is_last_train_iter(runner) assert return_val
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import Hook class TestHook: def test_before_run(self): hook = Hook() runner = Mock() hook.before_run(runner) def test_after_run(self): hook = Hook() runner = Mock() hook.after_run(runner) def test_before_epoch(self): hook = Hook() runner = Mock() hook._before_epoch(runner) def test_after_epoch(self): hook = Hook() runner = Mock() hook._after_epoch(runner) def test_before_iter(self): hook = Hook() runner = Mock() data_batch = {} hook._before_iter(runner, data_batch) def test_after_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook._after_iter(runner, data_batch, outputs) def test_before_save_checkpoint(self): hook = Hook() runner = Mock() checkpoint = {} hook.before_save_checkpoint(runner, checkpoint) def test_after_load_checkpoint(self): hook = Hook() runner = Mock() checkpoint = {} hook.after_load_checkpoint(runner, checkpoint) def test_before_train_epoch(self): hook = Hook() runner = Mock() hook.before_train_epoch(runner) def test_before_val_epoch(self): hook = Hook() runner = Mock() hook.before_val_epoch(runner) def test_before_test_epoch(self): hook = Hook() runner = Mock() hook.before_test_epoch(runner) def test_after_train_epoch(self): hook = Hook() runner = Mock() hook.after_train_epoch(runner) def test_after_val_epoch(self): hook = Hook() runner = Mock() hook.after_val_epoch(runner) def test_after_test_epoch(self): hook = Hook() runner = Mock() hook.after_test_epoch(runner) def test_before_train_iter(self): hook = Hook() runner = Mock() data_batch = {} hook.before_train_iter(runner, data_batch) def test_before_val_iter(self): hook = Hook() runner = Mock() data_batch = {} hook.before_val_iter(runner, data_batch) def test_before_test_iter(self): hook = Hook() runner = Mock() data_batch = {} hook.before_test_iter(runner, data_batch) def test_after_train_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook.after_train_iter(runner, data_batch, outputs) def test_after_val_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook.after_val_iter(runner, data_batch, outputs) def test_after_test_iter(self): hook = Hook() runner = Mock() data_batch = {} outputs = {} hook.after_test_iter(runner, data_batch, outputs) def test_every_n_epochs(self): hook = Hook() runner = Mock() for i in range(100): runner.epoch = i return_val = hook.every_n_epochs(runner, 3) if (i + 1) % 3 == 0: assert return_val else: assert not return_val def test_every_n_inner_iters(self): hook = Hook() for i in range(100): return_val = hook.every_n_inner_iters(i, 3) if (i + 1) % 3 == 0: assert return_val else: assert not return_val def test_every_n_iters(self): hook = Hook() runner = Mock() for i in range(100): runner.iter = i return_val = hook.every_n_iters(runner, 3) if (i + 1) % 3 == 0: assert return_val else: assert not return_val def test_end_of_epoch(self): hook = Hook() # last inner iter batch_idx = 1 dataloader = Mock() dataloader.__len__ = Mock(return_value=2) return_val = hook.end_of_epoch(dataloader, batch_idx) assert return_val # not the last inner iter batch_idx = 0 return_val = hook.end_of_epoch(dataloader, batch_idx) assert not return_val def test_is_last_train_epoch(self): hook = Hook() runner = Mock() # last epoch runner.epoch = 1 runner.max_epochs = 2 return_val = hook.is_last_train_epoch(runner) assert return_val # not the last epoch runner.max_epochs = 0 return_val = hook.is_last_train_epoch(runner) assert not return_val def test_is_last_train_iter(self): hook = Hook() runner = Mock() # last iter runner.iter = 1 runner.max_iters = 2 return_val = hook.is_last_train_iter(runner) assert return_val
import inspect from keras.src.api_export import keras_export from keras.src.quantizers.quantizers import AbsMaxQuantizer from keras.src.quantizers.quantizers import Quantizer from keras.src.quantizers.quantizers import abs_max_quantize from keras.src.quantizers.quantizers import compute_float8_amax_history from keras.src.quantizers.quantizers import compute_float8_scale from keras.src.quantizers.quantizers import fake_quant_with_min_max_vars from keras.src.quantizers.quantizers import quantize_and_dequantize from keras.src.saving import serialization_lib from keras.src.utils.naming import to_snake_case ALL_OBJECTS = {Quantizer, AbsMaxQuantizer} ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} ALL_OBJECTS_DICT.update( {to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS} ) @keras_export("keras.quantizers.serialize") def serialize(initializer): return serialization_lib.serialize_keras_object(initializer) @keras_export("keras.quantizers.deserialize") def deserialize(config, custom_objects=None): """Return a Keras quantizer object via its config.""" return serialization_lib.deserialize_keras_object( config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects, ) @keras_export("keras.quantizers.get") def get(identifier, **kwargs): """Retrieve a Keras quantizer object via an identifier.""" if identifier is None: return None if isinstance(identifier, dict): obj = deserialize(identifier) elif isinstance(identifier, str): obj = ALL_OBJECTS_DICT.get(identifier, None) else: obj = identifier if callable(obj): if inspect.isclass(obj): obj = obj(kwargs) return obj else: raise ValueError( f"Could not interpret quantizer identifier: {identifier}" )
import inspect from keras.src.api_export import keras_export from keras.src.quantizers.quantizers import AbsMaxQuantizer from keras.src.quantizers.quantizers import Quantizer from keras.src.quantizers.quantizers import abs_max_quantize from keras.src.quantizers.quantizers import compute_float8_amax_history from keras.src.quantizers.quantizers import compute_float8_scale from keras.src.quantizers.quantizers import quantize_and_dequantize from keras.src.saving import serialization_lib from keras.src.utils.naming import to_snake_case ALL_OBJECTS = {Quantizer, AbsMaxQuantizer} ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} ALL_OBJECTS_DICT.update( {to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS} ) @keras_export("keras.quantizers.serialize") def serialize(initializer): return serialization_lib.serialize_keras_object(initializer) @keras_export("keras.quantizers.deserialize") def deserialize(config, custom_objects=None): """Return a Keras quantizer object via its config.""" return serialization_lib.deserialize_keras_object( config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects, ) @keras_export("keras.quantizers.get") def get(identifier, **kwargs): """Retrieve a Keras quantizer object via an identifier.""" if identifier is None: return None if isinstance(identifier, dict): obj = deserialize(identifier) elif isinstance(identifier, str): obj = ALL_OBJECTS_DICT.get(identifier, None) else: obj = identifier if callable(obj): if inspect.isclass(obj): obj = obj(kwargs) return obj else: raise ValueError( f"Could not interpret quantizer identifier: {identifier}" )
import os from functools import lru_cache from subprocess import CalledProcessError, run from typing import Optional, Union import numpy as np import torch import torch.nn.functional as F from .utils import exact_div # hard-coded audio hyperparameters SAMPLE_RATE = 16000 N_FFT = 400 N_MELS = 80 HOP_LENGTH = 160 CHUNK_LENGTH = 30 N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2 FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token def load_audio(file: str, sr: int = SAMPLE_RATE): """ Open an audio file and read as mono waveform, resampling as necessary Parameters ---------- file: str The audio file to open sr: int The sample rate to resample the audio if necessary Returns ------- A NumPy array containing the audio waveform, in float32 dtype. """ # This launches a subprocess to decode audio while down-mixing # and resampling as necessary. Requires the ffmpeg CLI in PATH. # fmt: off cmd = [ "ffmpeg", "-nostdin", "-threads", "0", "-i", file, "-f", "s16le", "-ac", "1", "-acodec", "pcm_s16le", "-ar", str(sr), "-" ] # fmt: on try: out = run(cmd, capture_output=True, check=True).stdout except CalledProcessError as e: raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): """ Pad or trim the audio array to N_SAMPLES, as expected by the encoder. """ if torch.is_tensor(array): if array.shape[axis] > length: array = array.index_select( dim=axis, index=torch.arange(length, device=array.device) ) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes]) else: if array.shape[axis] > length: array = array.take(indices=range(length), axis=axis) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = np.pad(array, pad_widths) return array @lru_cache(maxsize=None) def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor: """ load the mel filterbank matrix for projecting STFT into a Mel spectrogram. Allows decoupling librosa dependency; saved using: np.savez_compressed( "mel_filters.npz", mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80), ) """ assert n_mels == 80, f"Unsupported n_mels: {n_mels}" with np.load( os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz") ) as f: return torch.from_numpy(f[f"mel_{n_mels}"]).to(device) def log_mel_spectrogram( audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS, padding: int = 0, device: Optional[Union[str, torch.device]] = None, ): """ Compute the log-Mel spectrogram of Parameters ---------- audio: Union[str, np.ndarray, torch.Tensor], shape = (*) The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz n_mels: int The number of Mel-frequency filters, only 80 is supported padding: int Number of zero samples to pad to the right device: Optional[Union[str, torch.device]] If given, the audio tensor is moved to this device before STFT Returns ------- torch.Tensor, shape = (80, n_frames) A Tensor that contains the Mel spectrogram """ if not torch.is_tensor(audio): if isinstance(audio, str): audio = load_audio(audio) audio = torch.from_numpy(audio) if device is not None: audio = audio.to(device) if padding > 0: audio = F.pad(audio, (0, padding)) window = torch.hann_window(N_FFT).to(audio.device) stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 filters = mel_filters(audio.device, n_mels) mel_spec = filters @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 return log_spec
import os from functools import lru_cache from typing import Optional, Union import ffmpeg import numpy as np import torch import torch.nn.functional as F from .utils import exact_div # hard-coded audio hyperparameters SAMPLE_RATE = 16000 N_FFT = 400 N_MELS = 80 HOP_LENGTH = 160 CHUNK_LENGTH = 30 N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2 FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token def load_audio(file: str, sr: int = SAMPLE_RATE): """ Open an audio file and read as mono waveform, resampling as necessary Parameters ---------- file: str The audio file to open sr: int The sample rate to resample the audio if necessary Returns ------- A NumPy array containing the audio waveform, in float32 dtype. """ try: # This launches a subprocess to decode audio while down-mixing and resampling as necessary. # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. out, _ = ( ffmpeg.input(file, threads=0) .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr) .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) ) except ffmpeg.Error as e: raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): """ Pad or trim the audio array to N_SAMPLES, as expected by the encoder. """ if torch.is_tensor(array): if array.shape[axis] > length: array = array.index_select( dim=axis, index=torch.arange(length, device=array.device) ) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes]) else: if array.shape[axis] > length: array = array.take(indices=range(length), axis=axis) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = np.pad(array, pad_widths) return array @lru_cache(maxsize=None) def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor: """ load the mel filterbank matrix for projecting STFT into a Mel spectrogram. Allows decoupling librosa dependency; saved using: np.savez_compressed( "mel_filters.npz", mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80), ) """ assert n_mels == 80, f"Unsupported n_mels: {n_mels}" with np.load( os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz") ) as f: return torch.from_numpy(f[f"mel_{n_mels}"]).to(device) def log_mel_spectrogram( audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS, padding: int = 0, device: Optional[Union[str, torch.device]] = None, ): """ Compute the log-Mel spectrogram of Parameters ---------- audio: Union[str, np.ndarray, torch.Tensor], shape = (*) The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz n_mels: int The number of Mel-frequency filters, only 80 is supported padding: int Number of zero samples to pad to the right device: Optional[Union[str, torch.device]] If given, the audio tensor is moved to this device before STFT Returns ------- torch.Tensor, shape = (80, n_frames) A Tensor that contains the Mel spectrogram """ if not torch.is_tensor(audio): if isinstance(audio, str): audio = load_audio(audio) audio = torch.from_numpy(audio) if device is not None: audio = audio.to(device) if padding > 0: audio = F.pad(audio, (0, padding)) window = torch.hann_window(N_FFT).to(audio.device) stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 filters = mel_filters(audio.device, n_mels) mel_spec = filters @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 return log_spec
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder, threshold: float = None) -> None: """ FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models. It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point operations (FLOPs) required during inference by encouraging more zero values in the embeddings. It can use a threshold to ignore embeddings with too few non-zero elements. This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than being used as a standalone loss function. Args: model: SparseEncoder model to be regularized threshold: Optional threshold for the number of non-zero elements in the embeddings. If specified, only embeddings with more than this number of non-zero elements will be considered. This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss. References: - For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking. Relations: - Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings Example: - This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components. """ super().__init__() self.model = model self.threshold = threshold def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor: # Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives) embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features] return self.compute_loss_from_embeddings(embeddings) def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor: if embeddings_type == "query": embeddings_to_use = embeddings[0] # (batch_size, embedding_dim) else: embeddings_to_use = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim) if self.threshold is not None: l0_norm = (embeddings_to_use != 0).sum(dim=1) mask = (l0_norm > self.threshold).float() embeddings_to_use = embeddings_to_use * mask.unsqueeze(1) return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2) @property def citation(self) -> str: return """ @article{paria2020minimizing, title={Minimizing flops to learn efficient sparse representations}, author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s}, journal={arXiv preprint arXiv:2004.05665}, year={2020} } """
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder, threshold: float = None) -> None: """ FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models. It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point operations (FLOPs) required during inference by encouraging more zero values in the embeddings. It can use a threshold to ignore embeddings with too few non-zero elements. This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than being used as a standalone loss function. Args: model: SparseEncoder model to be regularized threshold: Optional threshold for the number of non-zero elements in the embeddings. If specified, only embeddings with more than this number of non-zero elements will be considered. This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss. References: - For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking. Relations: - Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings Example: - This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components. """ super().__init__() self.model = model self.threshold = threshold def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor: # Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives) embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features] return self.compute_loss_from_embeddings(embeddings) def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor: if embeddings_type == "query": embeddings_to_use = embeddings[0] # (batch_size, embedding_dim) else: embeddings_to_use = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim) if self.threshold is not None: l0_norm = (embeddings_to_use != 0).sum(dim=1) mask = (l0_norm > self.threshold).float() embeddings_to_use = embeddings_to_use * mask.unsqueeze(1) if embeddings_type == "query": return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2) else: return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2) @property def citation(self) -> str: return """ @article{paria2020minimizing, title={Minimizing flops to learn efficient sparse representations}, author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s}, journal={arXiv preprint arXiv:2004.05665}, year={2020} } """
from __future__ import annotations from typing import Any, Optional, Union, cast from langchain_core.messages import AIMessage, ToolCall from langchain_core.messages.tool import tool_call from langchain_core.output_parsers import BaseGenerationOutputParser from langchain_core.outputs import ChatGeneration, Generation from pydantic import BaseModel, ConfigDict class ToolsOutputParser(BaseGenerationOutputParser): """Output parser for tool calls.""" first_tool_only: bool = False """Whether to return only the first tool call.""" args_only: bool = False """Whether to return only the arguments of the tool calls.""" pydantic_schemas: Optional[list[type[BaseModel]]] = None """Pydantic schemas to parse tool calls into.""" model_config = ConfigDict( extra="forbid", ) def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: """Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. partial: (Not used) Whether the result is a partial result. If True, the parser may return a partial result, which may not be complete or valid. Returns: Structured output. """ if not result or not isinstance(result[0], ChatGeneration): return None if self.first_tool_only else [] message = cast(AIMessage, result[0].message) tool_calls: list = [ dict(tc) for tc in _extract_tool_calls_from_message(message) ] if isinstance(message.content, list): # Map tool call id to index id_to_index = { block["id"]: i for i, block in enumerate(message.content) if isinstance(block, dict) and block["type"] == "tool_use" } tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls] if self.pydantic_schemas: tool_calls = [self._pydantic_parse(tc) for tc in tool_calls] elif self.args_only: tool_calls = [tc["args"] for tc in tool_calls] else: pass if self.first_tool_only: return tool_calls[0] if tool_calls else None return list(tool_calls) def _pydantic_parse(self, tool_call: dict) -> BaseModel: cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[ tool_call["name"] ] return cls_(**tool_call["args"]) def _extract_tool_calls_from_message(message: AIMessage) -> list[ToolCall]: """Extract tool calls from a list of content blocks.""" if message.tool_calls: return message.tool_calls return extract_tool_calls(message.content) def extract_tool_calls(content: Union[str, list[Union[str, dict]]]) -> list[ToolCall]: """Extract tool calls from a list of content blocks.""" if isinstance(content, list): tool_calls = [] for block in content: if isinstance(block, str): continue if block["type"] != "tool_use": continue tool_calls.append( tool_call(name=block["name"], args=block["input"], id=block["id"]), ) return tool_calls return []
from typing import Any, Optional, Union, cast from langchain_core.messages import AIMessage, ToolCall from langchain_core.messages.tool import tool_call from langchain_core.output_parsers import BaseGenerationOutputParser from langchain_core.outputs import ChatGeneration, Generation from pydantic import BaseModel, ConfigDict class ToolsOutputParser(BaseGenerationOutputParser): """Output parser for tool calls.""" first_tool_only: bool = False """Whether to return only the first tool call.""" args_only: bool = False """Whether to return only the arguments of the tool calls.""" pydantic_schemas: Optional[list[type[BaseModel]]] = None """Pydantic schemas to parse tool calls into.""" model_config = ConfigDict( extra="forbid", ) def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: """Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ if not result or not isinstance(result[0], ChatGeneration): return None if self.first_tool_only else [] message = cast(AIMessage, result[0].message) tool_calls: list = [ dict(tc) for tc in _extract_tool_calls_from_message(message) ] if isinstance(message.content, list): # Map tool call id to index id_to_index = { block["id"]: i for i, block in enumerate(message.content) if isinstance(block, dict) and block["type"] == "tool_use" } tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls] if self.pydantic_schemas: tool_calls = [self._pydantic_parse(tc) for tc in tool_calls] elif self.args_only: tool_calls = [tc["args"] for tc in tool_calls] else: pass if self.first_tool_only: return tool_calls[0] if tool_calls else None else: return [tool_call for tool_call in tool_calls] def _pydantic_parse(self, tool_call: dict) -> BaseModel: cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[ tool_call["name"] ] return cls_(**tool_call["args"]) def _extract_tool_calls_from_message(message: AIMessage) -> list[ToolCall]: """Extract tool calls from a list of content blocks.""" if message.tool_calls: return message.tool_calls return extract_tool_calls(message.content) def extract_tool_calls(content: Union[str, list[Union[str, dict]]]) -> list[ToolCall]: """Extract tool calls from a list of content blocks.""" if isinstance(content, list): tool_calls = [] for block in content: if isinstance(block, str): continue if block["type"] != "tool_use": continue tool_calls.append( tool_call(name=block["name"], args=block["input"], id=block["id"]) ) return tool_calls else: return []
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`Feature` for translations with fixed languages per example. Here for compatibility with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to string translations. Example: ```python >>> # At construction time: >>> datasets.features.Translation(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': 'le chat', ... 'de': 'die katze' ... } ``` """ languages: list[str] id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="Translation", init=False, repr=False) def __call__(self): return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """Flatten the Translation feature into a dictionary.""" from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class TranslationVariableLanguages: """`Feature` for translations with variable languages per example. Here for compatibility with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to one or more string translations. The languages present may vary from example to example. Returns: - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`): Language codes sorted in ascending order or plain text translations, sorted to align with language codes. Example: ```python >>> # At construction time: >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': ['le chat', 'la chatte,'] ... 'de': 'die katze' ... } >>> # Tensor returned : >>> { ... 'language': ['en', 'de', 'fr', 'fr'], ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'], ... } ``` """ languages: Optional[list] = None num_languages: Optional[int] = None id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="TranslationVariableLanguages", init=False, repr=False) def __post_init__(self): self.languages = sorted(set(self.languages)) if self.languages else None self.num_languages = len(self.languages) if self.languages else None def __call__(self): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def encode_example(self, translation_dict): lang_set = set(self.languages) if set(translation_dict) == {"language", "translation"}: return translation_dict elif self.languages and set(translation_dict) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. translation_tuples = [] for lang, text in translation_dict.items(): if isinstance(text, str): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. languages, translations = zip(*sorted(translation_tuples)) return {"language": languages, "translation": translations} def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """Flatten the TranslationVariableLanguages feature into a dictionary.""" from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`Feature` for translations with fixed languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to string translations. Example: ```python >>> # At construction time: >>> datasets.features.Translation(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': 'le chat', ... 'de': 'die katze' ... } ``` """ languages: list[str] id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="Translation", init=False, repr=False) def __call__(self): return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """Flatten the Translation feature into a dictionary.""" from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class TranslationVariableLanguages: """`Feature` for translations with variable languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to one or more string translations. The languages present may vary from example to example. Returns: - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`): Language codes sorted in ascending order or plain text translations, sorted to align with language codes. Example: ```python >>> # At construction time: >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': ['le chat', 'la chatte,'] ... 'de': 'die katze' ... } >>> # Tensor returned : >>> { ... 'language': ['en', 'de', 'fr', 'fr'], ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'], ... } ``` """ languages: Optional[list] = None num_languages: Optional[int] = None id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="TranslationVariableLanguages", init=False, repr=False) def __post_init__(self): self.languages = sorted(set(self.languages)) if self.languages else None self.num_languages = len(self.languages) if self.languages else None def __call__(self): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def encode_example(self, translation_dict): lang_set = set(self.languages) if set(translation_dict) == {"language", "translation"}: return translation_dict elif self.languages and set(translation_dict) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. translation_tuples = [] for lang, text in translation_dict.items(): if isinstance(text, str): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. languages, translations = zip(*sorted(translation_tuples)) return {"language": languages, "translation": translations} def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """Flatten the TranslationVariableLanguages feature into a dictionary.""" from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
import zlib from typing import Iterator, TextIO def exact_div(x, y): assert x % y == 0 return x // y def str2bool(string): str2val = {"True": True, "False": False} if string in str2val: return str2val[string] else: raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}") def optional_int(string): return None if string == "None" else int(string) def optional_float(string): return None if string == "None" else float(string) def compression_ratio(text) -> float: return len(text) / len(zlib.compress(text.encode("utf-8"))) def format_timestamp(seconds: float, always_include_hours: bool = False): assert seconds >= 0, "non-negative timestamp expected" milliseconds = round(seconds * 1000.0) hours = milliseconds // 3_600_000 milliseconds -= hours * 3_600_000 minutes = milliseconds // 60_000 milliseconds -= minutes * 60_000 seconds = milliseconds // 1_000 milliseconds -= seconds * 1_000 hours_marker = f"{hours}:" if always_include_hours or hours > 0 else "" return f"{hours_marker}{minutes:02d}:{seconds:02d}.{milliseconds:03d}" def write_txt(transcript: Iterator[dict], file: TextIO): for segment in transcript: print(segment['text'].strip(), file=file, flush=True) def write_vtt(transcript: Iterator[dict], file: TextIO): print("WEBVTT\n", file=file) for segment in transcript: print( f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n" f"{segment['text'].replace('-->', '->')}\n", file=file, flush=True, ) def write_srt(transcript: Iterator[dict], file: TextIO): """ Write a transcript to a file in SRT format. Example usage: from pathlib import Path from whisper.utils import write_srt result = transcribe(model, audio_path, temperature=temperature, **args) # save SRT audio_basename = Path(audio_path).stem with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt: write_srt(result["segments"], file=srt) """ for i, segment in enumerate(transcript, start=1): # write srt lines print( f"{i}\n" f"{format_timestamp(segment['start'], always_include_hours=True)} --> " f"{format_timestamp(segment['end'], always_include_hours=True)}\n" f"{segment['text'].strip().replace('-->', '->')}\n", file=file, flush=True, )
import zlib from typing import Iterator, TextIO def exact_div(x, y): assert x % y == 0 return x // y def str2bool(string): str2val = {"True": True, "False": False} if string in str2val: return str2val[string] else: raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}") def optional_int(string): return None if string == "None" else int(string) def optional_float(string): return None if string == "None" else float(string) def compression_ratio(text) -> float: return len(text) / len(zlib.compress(text.encode("utf-8"))) def format_timestamp(seconds: float, always_include_hours: bool = False): assert seconds >= 0, "non-negative timestamp expected" milliseconds = round(seconds * 1000.0) hours = milliseconds // 3_600_000 milliseconds -= hours * 3_600_000 minutes = milliseconds // 60_000 milliseconds -= minutes * 60_000 seconds = milliseconds // 1_000 milliseconds -= seconds * 1_000 hours_marker = f"{hours}:" if always_include_hours or hours > 0 else "" return f"{hours_marker}{minutes:02d}:{seconds:02d}.{milliseconds:03d}" def write_vtt(transcript: Iterator[dict], file: TextIO): print("WEBVTT\n", file=file) for segment in transcript: print( f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n" f"{segment['text'].replace('-->', '->')}\n", file=file, flush=True, ) def write_srt(transcript: Iterator[dict], file: TextIO): """ Write a transcript to a file in SRT format. Example usage: from pathlib import Path from whisper.utils import write_srt result = transcribe(model, audio_path, temperature=temperature, **args) # save SRT audio_basename = Path(audio_path).stem with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt: write_srt(result["segments"], file=srt) """ for i, segment in enumerate(transcript, start=1): # write srt lines print( f"{i}\n" f"{format_timestamp(segment['start'], always_include_hours=True)} --> " f"{format_timestamp(segment['end'], always_include_hours=True)}\n" f"{segment['text'].strip().replace('-->', '->')}\n", file=file, flush=True, )
import inspect import threading from abc import abstractmethod from typing import Any, Dict, List, Generic, Optional, TypeVar from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr, ConfigDict from llama_index.core.instrumentation.span.base import BaseSpan T = TypeVar("T", bound=BaseSpan) class BaseSpanHandler(BaseModel, Generic[T]): model_config = ConfigDict(arbitrary_types_allowed=True) open_spans: Dict[str, T] = Field( default_factory=dict, description="Dictionary of open spans." ) completed_spans: List[T] = Field( default_factory=list, description="List of completed spans." ) dropped_spans: List[T] = Field( default_factory=list, description="List of completed spans." ) current_span_ids: Dict[Any, Optional[str]] = Field( default={}, description="Id of current spans in a given thread." ) _lock: Optional[threading.Lock] = PrivateAttr() def __init__( self, open_spans: Dict[str, T] = {}, completed_spans: List[T] = [], dropped_spans: List[T] = [], current_span_ids: Dict[Any, str] = {}, ): super().__init__( open_spans=open_spans, completed_spans=completed_spans, dropped_spans=dropped_spans, current_span_ids=current_span_ids, ) self._lock = None def class_name(cls) -> str: """Class name.""" return "BaseSpanHandler" @property def lock(self) -> threading.Lock: if self._lock is None: self._lock = threading.Lock() return self._lock def span_enter( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, parent_id: Optional[str] = None, tags: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Logic for entering a span.""" if id_ in self.open_spans: pass # should probably raise an error here else: span = self.new_span( id_=id_, bound_args=bound_args, instance=instance, parent_span_id=parent_id, tags=tags, ) if span: with self.lock: self.open_spans[id_] = span def span_exit( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, result: Optional[Any] = None, **kwargs: Any, ) -> None: """Logic for exiting a span.""" span = self.prepare_to_exit_span( id_=id_, bound_args=bound_args, instance=instance, result=result ) if span: with self.lock: del self.open_spans[id_] def span_drop( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, err: Optional[BaseException] = None, **kwargs: Any, ) -> None: """Logic for dropping a span i.e. early exit.""" span = self.prepare_to_drop_span( id_=id_, bound_args=bound_args, instance=instance, err=err ) if span: with self.lock: del self.open_spans[id_] @abstractmethod def new_span( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, parent_span_id: Optional[str] = None, tags: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Optional[T]: """ Create a span. Subclasses of BaseSpanHandler should create the respective span type T and return it. Only NullSpanHandler should return a None here. """ ... @abstractmethod def prepare_to_exit_span( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, result: Optional[Any] = None, **kwargs: Any, ) -> Optional[T]: """ Logic for preparing to exit a span. Subclasses of BaseSpanHandler should return back the specific span T that is to be exited. If None is returned, then the span won't actually be exited. """ ... @abstractmethod def prepare_to_drop_span( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, err: Optional[BaseException] = None, **kwargs: Any, ) -> Optional[T]: """ Logic for preparing to drop a span. Subclasses of BaseSpanHandler should return back the specific span T that is to be dropped. If None is returned, then the span won't actually be dropped. """ ...
import inspect import threading from abc import abstractmethod from typing import Any, Dict, List, Generic, Optional, TypeVar from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr, ConfigDict from llama_index.core.instrumentation.span.base import BaseSpan T = TypeVar("T", bound=BaseSpan) class BaseSpanHandler(BaseModel, Generic[T]): model_config = ConfigDict(arbitrary_types_allowed=True) open_spans: Dict[str, T] = Field( default_factory=dict, description="Dictionary of open spans." ) completed_spans: List[T] = Field( default_factory=list, description="List of completed spans." ) dropped_spans: List[T] = Field( default_factory=list, description="List of completed spans." ) current_span_ids: Dict[Any, Optional[str]] = Field( default={}, description="Id of current spans in a given thread." ) _lock: Optional[threading.Lock] = PrivateAttr() def __init__( self, open_spans: Dict[str, T] = {}, completed_spans: List[T] = [], dropped_spans: List[T] = [], current_span_ids: Dict[Any, str] = {}, ): super().__init__( open_spans=open_spans, completed_spans=completed_spans, dropped_spans=dropped_spans, current_span_ids=current_span_ids, ) self._lock = None def class_name(cls) -> str: """Class name.""" return "BaseSpanHandler" @property def lock(self) -> threading.Lock: if self._lock is None: self._lock = threading.Lock() return self._lock def span_enter( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, parent_id: Optional[str] = None, tags: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Logic for entering a span.""" if id_ in self.open_spans: pass # should probably raise an error here else: span = self.new_span( id_=id_, bound_args=bound_args, instance=instance, parent_span_id=parent_id, tags=tags, ) if span: with self.lock: self.open_spans[id_] = span def span_exit( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, result: Optional[Any] = None, **kwargs: Any, ) -> None: """Logic for exiting a span.""" span = self.prepare_to_exit_span( id_=id_, bound_args=bound_args, instance=instance, result=result ) if span: with self.lock: del self.open_spans[id_] def span_drop( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, err: Optional[BaseException] = None, **kwargs: Any, ) -> None: """Logic for dropping a span i.e. early exit.""" span = self.prepare_to_drop_span( id_=id_, bound_args=bound_args, instance=instance, err=err ) if span: with self.lock: del self.open_spans[id_] @abstractmethod def new_span( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, parent_span_id: Optional[str] = None, tags: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Optional[T]: """Create a span. Subclasses of BaseSpanHandler should create the respective span type T and return it. Only NullSpanHandler should return a None here. """ ... @abstractmethod def prepare_to_exit_span( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, result: Optional[Any] = None, **kwargs: Any, ) -> Optional[T]: """Logic for preparing to exit a span. Subclasses of BaseSpanHandler should return back the specific span T that is to be exited. If None is returned, then the span won't actually be exited. """ ... @abstractmethod def prepare_to_drop_span( self, id_: str, bound_args: inspect.BoundArguments, instance: Optional[Any] = None, err: Optional[BaseException] = None, **kwargs: Any, ) -> Optional[T]: """Logic for preparing to drop a span. Subclasses of BaseSpanHandler should return back the specific span T that is to be dropped. If None is returned, then the span won't actually be dropped. """ ...
from keras.src import backend from keras.src import ops from keras.src.api_export import keras_export from keras.src.layers.layer import Layer @keras_export("keras.layers.Masking") class Masking(Layer): """Masks a sequence by using a mask value to skip timesteps. For each timestep in the input tensor (dimension #1 in the tensor), if all values in the input tensor at that timestep are equal to `mask_value`, then the timestep will be masked (skipped) in all downstream layers (as long as they support masking). If any downstream layer does not support masking yet receives such an input mask, an exception will be raised. Example: Consider a NumPy data array `x` of shape `(samples, timesteps, features)`, to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you lack data for these timesteps. You can: - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.` - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer: ```python samples, timesteps, features = 32, 10, 8 inputs = np.random.random([samples, timesteps, features]).astype(np.float32) inputs[:, 3, :] = 0. inputs[:, 5, :] = 0. model = keras.models.Sequential() model.add(keras.layers.Masking(mask_value=0.) model.add(keras.layers.LSTM(32)) output = model(inputs) # The time step 3 and 5 will be skipped from LSTM calculation. ``` Note: in the Keras masking convention, a masked timestep is denoted by a mask value of `False`, while a non-masked (i.e. usable) timestep is denoted by a mask value of `True`. """ def __init__(self, mask_value=0.0, **kwargs): super().__init__(**kwargs) self.mask_value = mask_value self.supports_masking = True self.built = True def compute_mask(self, inputs, mask=None): return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1) def call(self, inputs): boolean_mask = ops.any( ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True ) # Set masked outputs to 0 outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype) # Compute the mask and outputs simultaneously. try: outputs._keras_mask = ops.squeeze(boolean_mask, axis=-1) except AttributeError: # tensor is a C type. pass return outputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config = super().get_config() config = {"mask_value": self.mask_value} return {**base_config, **config}
from keras.src import backend from keras.src import ops from keras.src.api_export import keras_export from keras.src.layers.layer import Layer @keras_export("keras.layers.Masking") class Masking(Layer): """Masks a sequence by using a mask value to skip timesteps. For each timestep in the input tensor (dimension #1 in the tensor), if all values in the input tensor at that timestep are equal to `mask_value`, then the timestep will be masked (skipped) in all downstream layers (as long as they support masking). If any downstream layer does not support masking yet receives such an input mask, an exception will be raised. Example: Consider a NumPy data array `x` of shape `(samples, timesteps, features)`, to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you lack data for these timesteps. You can: - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.` - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer: ```python samples, timesteps, features = 32, 10, 8 inputs = np.random.random([samples, timesteps, features]).astype(np.float32) inputs[:, 3, :] = 0. inputs[:, 5, :] = 0. model = keras.models.Sequential() model.add(keras.layers.Masking(mask_value=0.) model.add(keras.layers.LSTM(32)) output = model(inputs) # The time step 3 and 5 will be skipped from LSTM calculation. ``` Note: in the Keras masking convention, a masked timestep is denoted by a mask value of `False`, while a non-masked (i.e. usable) timestep is denoted by a mask value of `True`. """ def __init__(self, mask_value=0.0, **kwargs): super().__init__(**kwargs) self.supports_masking = True self.mask_value = mask_value def compute_mask(self, inputs, mask=None): return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1) def call(self, inputs): boolean_mask = ops.any( ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True ) # Set masked outputs to 0 outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype) # Compute the mask and outputs simultaneously. try: outputs._keras_mask = ops.squeeze(boolean_mask, axis=-1) except AttributeError: # tensor is a C type. pass return outputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config = super().get_config() config = {"mask_value": self.mask_value} return {**base_config, **config}
from llama_index.indices.managed.llama_cloud.base import LlamaCloudIndex from llama_index.indices.managed.llama_cloud.retriever import LlamaCloudRetriever from llama_index.indices.managed.llama_cloud.composite_retriever import ( LlamaCloudCompositeRetriever, ) __all__ = [ "LlamaCloudIndex", "LlamaCloudRetriever", "LlamaCloudCompositeRetriever", ]
from llama_index.indices.managed.llama_cloud.base import LlamaCloudIndex from llama_index.indices.managed.llama_cloud.retriever import LlamaCloudRetriever __all__ = [ "LlamaCloudIndex", "LlamaCloudRetriever", ]
__all__ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "LargeList", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", "Video", ] from .audio import Audio from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, LargeList, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages from .video import Video
__all__ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "LargeList", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, LargeList, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .logger import get_caller_name, get_root_logger, log_img_scale from .misc import find_latest_checkpoint, update_data_root from .replace_cfg_vals import replace_cfg_vals from .setup_env import setup_multi_processes from .split_batch import split_batch from .util_distribution import build_ddp, build_dp, get_device __all__ = [ 'get_root_logger', 'collect_env', 'find_latest_checkpoint', 'update_data_root', 'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp', 'get_device', 'replace_cfg_vals' ]
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .logger import get_caller_name, get_root_logger, log_img_scale from .misc import find_latest_checkpoint, update_data_root from .setup_env import setup_multi_processes from .split_batch import split_batch from .util_distribution import build_ddp, build_dp, get_device __all__ = [ 'get_root_logger', 'collect_env', 'find_latest_checkpoint', 'update_data_root', 'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp', 'get_device' ]
import csv import logging import os from typing import List import numpy as np from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CESoftmaxAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders with 2 or more outputs. It measure the accuracy of the predict class vs. the gold labels. """ def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str = "", write_csv: bool = True): self.sentence_pairs = sentence_pairs self.labels = labels self.name = name self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv" self.csv_headers = ["epoch", "steps", "Accuracy"] self.write_csv = write_csv @classmethod def from_input_examples(cls, examples: List[InputExample], **kwargs): sentence_pairs = [] labels = [] for example in examples: sentence_pairs.append(example.texts) labels.append(example.label) return cls(sentence_pairs, labels, **kwargs) def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float: if epoch != -1: if steps == -1: out_txt = " after epoch {}:".format(epoch) else: out_txt = " in epoch {} after {} steps:".format(epoch, steps) else: out_txt = ":" logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt) pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False) pred_labels = np.argmax(pred_scores, axis=1) assert len(pred_labels) == len(self.labels) acc = np.sum(pred_labels == self.labels) / len(self.labels) logger.info("Accuracy: {:.2f}".format(acc * 100)) if output_path is not None and self.write_csv: csv_path = os.path.join(output_path, self.csv_file) output_file_exists = os.path.isfile(csv_path) with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f: writer = csv.writer(f) if not output_file_exists: writer.writerow(self.csv_headers) writer.writerow([epoch, steps, acc]) return acc
import logging import os import csv from typing import List from ... import InputExample import numpy as np logger = logging.getLogger(__name__) class CESoftmaxAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders with 2 or more outputs. It measure the accuracy of the predict class vs. the gold labels. """ def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str = "", write_csv: bool = True): self.sentence_pairs = sentence_pairs self.labels = labels self.name = name self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv" self.csv_headers = ["epoch", "steps", "Accuracy"] self.write_csv = write_csv @classmethod def from_input_examples(cls, examples: List[InputExample], **kwargs): sentence_pairs = [] labels = [] for example in examples: sentence_pairs.append(example.texts) labels.append(example.label) return cls(sentence_pairs, labels, **kwargs) def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float: if epoch != -1: if steps == -1: out_txt = " after epoch {}:".format(epoch) else: out_txt = " in epoch {} after {} steps:".format(epoch, steps) else: out_txt = ":" logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt) pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False) pred_labels = np.argmax(pred_scores, axis=1) assert len(pred_labels) == len(self.labels) acc = np.sum(pred_labels == self.labels) / len(self.labels) logger.info("Accuracy: {:.2f}".format(acc * 100)) if output_path is not None and self.write_csv: csv_path = os.path.join(output_path, self.csv_file) output_file_exists = os.path.isfile(csv_path) with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f: writer = csv.writer(f) if not output_file_exists: writer.writerow(self.csv_headers) writer.writerow([epoch, steps, acc]) return acc
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import ( AudioNdArray, NdArray, VideoNdArray, VideoTorchTensor, VideoUrl, ) from docarray.utils.misc import is_tf_available from tests import TOYDATA_DIR tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.typing.tensor.video import VideoTensorFlowTensor LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4') REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501 @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_load(file_url): url = parse_obj_as(VideoUrl, file_url) video, audio, indices = url.load() assert isinstance(audio, np.ndarray) assert isinstance(audio, AudioNdArray) assert isinstance(video, np.ndarray) assert isinstance(video, VideoNdArray) assert isinstance(indices, np.ndarray) assert isinstance(indices, NdArray) @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) @pytest.mark.parametrize( 'field, attr_cls', [ ('video', VideoNdArray), ('audio', AudioNdArray), ('key_frame_indices', NdArray), ], ) def test_load_one_of_named_tuple_results(file_url, field, attr_cls): url = parse_obj_as(VideoUrl, file_url) result = getattr(url.load(), field) assert isinstance(result, np.ndarray) assert isinstance(result, attr_cls) @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_load_video_url_to_video_torch_tensor_field(file_url): class MyVideoDoc(BaseDoc): video_url: VideoUrl tensor: Optional[VideoTorchTensor] doc = MyVideoDoc(video_url=file_url) doc.tensor = doc.video_url.load().video assert isinstance(doc.tensor, torch.Tensor) assert isinstance(doc.tensor, VideoTorchTensor) @pytest.mark.tensorflow @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_load_video_url_to_video_tensorflow_tensor_field(file_url): class MyVideoDoc(BaseDoc): video_url: VideoUrl tensor: Optional[VideoTensorFlowTensor] doc = MyVideoDoc(video_url=file_url) doc.tensor = doc.video_url.load().video assert isinstance(doc.tensor, VideoTensorFlowTensor) assert isinstance(doc.tensor.tensor, tf.Tensor) def test_json_schema(): schema_json_of(VideoUrl) def test_dump_json(): url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE) orjson_dumps(url) @pytest.mark.parametrize( 'path_to_file', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_validation(path_to_file): url = parse_obj_as(VideoUrl, path_to_file) assert isinstance(url, VideoUrl) assert isinstance(url, str) @pytest.mark.parametrize( 'path_to_file', [ 'illegal', 'https://www.google.com', 'my/local/text/file.txt', 'my/local/text/file.png', 'my/local/file.mp3', ], ) def test_illegal_validation(path_to_file): with pytest.raises(ValueError, match='VideoUrl'): parse_obj_as(VideoUrl, path_to_file) @pytest.mark.proto @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_proto_video_url(file_url): uri = parse_obj_as(VideoUrl, file_url) proto = uri._to_node_protobuf() assert 'video_url' in str(proto) def test_load_bytes(): file_url = LOCAL_VIDEO_FILE uri = parse_obj_as(VideoUrl, file_url) video_bytes = uri.load_bytes() assert isinstance(video_bytes, bytes) assert len(video_bytes) > 0
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDocument from docarray.base_document.io.json import orjson_dumps from docarray.typing import ( AudioNdArray, NdArray, VideoNdArray, VideoTorchTensor, VideoUrl, ) from docarray.utils.misc import is_tf_available from tests import TOYDATA_DIR tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.typing.tensor.video import VideoTensorFlowTensor LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4') REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501 @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_load(file_url): url = parse_obj_as(VideoUrl, file_url) video, audio, indices = url.load() assert isinstance(audio, np.ndarray) assert isinstance(audio, AudioNdArray) assert isinstance(video, np.ndarray) assert isinstance(video, VideoNdArray) assert isinstance(indices, np.ndarray) assert isinstance(indices, NdArray) @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) @pytest.mark.parametrize( 'field, attr_cls', [ ('video', VideoNdArray), ('audio', AudioNdArray), ('key_frame_indices', NdArray), ], ) def test_load_one_of_named_tuple_results(file_url, field, attr_cls): url = parse_obj_as(VideoUrl, file_url) result = getattr(url.load(), field) assert isinstance(result, np.ndarray) assert isinstance(result, attr_cls) @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_load_video_url_to_video_torch_tensor_field(file_url): class MyVideoDoc(BaseDocument): video_url: VideoUrl tensor: Optional[VideoTorchTensor] doc = MyVideoDoc(video_url=file_url) doc.tensor = doc.video_url.load().video assert isinstance(doc.tensor, torch.Tensor) assert isinstance(doc.tensor, VideoTorchTensor) @pytest.mark.tensorflow @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_load_video_url_to_video_tensorflow_tensor_field(file_url): class MyVideoDoc(BaseDocument): video_url: VideoUrl tensor: Optional[VideoTensorFlowTensor] doc = MyVideoDoc(video_url=file_url) doc.tensor = doc.video_url.load().video assert isinstance(doc.tensor, VideoTensorFlowTensor) assert isinstance(doc.tensor.tensor, tf.Tensor) def test_json_schema(): schema_json_of(VideoUrl) def test_dump_json(): url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE) orjson_dumps(url) @pytest.mark.parametrize( 'path_to_file', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_validation(path_to_file): url = parse_obj_as(VideoUrl, path_to_file) assert isinstance(url, VideoUrl) assert isinstance(url, str) @pytest.mark.parametrize( 'path_to_file', [ 'illegal', 'https://www.google.com', 'my/local/text/file.txt', 'my/local/text/file.png', 'my/local/file.mp3', ], ) def test_illegal_validation(path_to_file): with pytest.raises(ValueError, match='VideoUrl'): parse_obj_as(VideoUrl, path_to_file) @pytest.mark.proto @pytest.mark.slow @pytest.mark.internet @pytest.mark.parametrize( 'file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE], ) def test_proto_video_url(file_url): uri = parse_obj_as(VideoUrl, file_url) proto = uri._to_node_protobuf() assert 'video_url' in str(proto) def test_load_bytes(): file_url = LOCAL_VIDEO_FILE uri = parse_obj_as(VideoUrl, file_url) video_bytes = uri.load_bytes() assert isinstance(video_bytes, bytes) assert len(video_bytes) > 0
from typing import Any, Dict, List, Optional from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K from llama_index.core.schema import NodeWithScore, QueryBundle from llama_index.core.settings import Settings from llama_index.core.vector_stores.types import MetadataFilters from .base import ColbertIndex class ColbertRetriever(BaseRetriever): """ Vector index retriever. Args: index (ColbertIndex): Colbert index. similarity_top_k (int): number of top k results to return. filters (Optional[MetadataFilters]): metadata filters, defaults to None doc_ids (Optional[List[str]]): list of documents to constrain search. colbert_kwargs (dict): Additional colbert specific kwargs to pass through to the colbert index at query time. """ def __init__( self, index: ColbertIndex, similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K, filters: Optional[MetadataFilters] = None, node_ids: Optional[List[str]] = None, doc_ids: Optional[List[str]] = None, callback_manager: Optional[CallbackManager] = None, object_map: Optional[dict] = None, verbose: bool = False, **kwargs: Any, ) -> None: """Initialize params.""" self._index = index self._docstore = self._index.docstore self._similarity_top_k = similarity_top_k self._node_ids = node_ids self._doc_ids = doc_ids self._filters = filters self._kwargs: Dict[str, Any] = kwargs.get("colbert_kwargs", {}) super().__init__( callback_manager=callback_manager or Settings.callback_manager, object_map=object_map, verbose=verbose, ) def _retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: return self._index.query( query_str=query_bundle.query_str, top_k=self._similarity_top_k, **self._kwargs, )
from typing import Any, Dict, List, Optional from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K from llama_index.core.schema import NodeWithScore, QueryBundle from llama_index.core.settings import Settings from llama_index.core.vector_stores.types import MetadataFilters from .base import ColbertIndex class ColbertRetriever(BaseRetriever): """Vector index retriever. Args: index (ColbertIndex): Colbert index. similarity_top_k (int): number of top k results to return. filters (Optional[MetadataFilters]): metadata filters, defaults to None doc_ids (Optional[List[str]]): list of documents to constrain search. colbert_kwargs (dict): Additional colbert specific kwargs to pass through to the colbert index at query time. """ def __init__( self, index: ColbertIndex, similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K, filters: Optional[MetadataFilters] = None, node_ids: Optional[List[str]] = None, doc_ids: Optional[List[str]] = None, callback_manager: Optional[CallbackManager] = None, object_map: Optional[dict] = None, verbose: bool = False, **kwargs: Any, ) -> None: """Initialize params.""" self._index = index self._docstore = self._index.docstore self._similarity_top_k = similarity_top_k self._node_ids = node_ids self._doc_ids = doc_ids self._filters = filters self._kwargs: Dict[str, Any] = kwargs.get("colbert_kwargs", {}) super().__init__( callback_manager=callback_manager or Settings.callback_manager, object_map=object_map, verbose=verbose, ) def _retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: return self._index.query( query_str=query_bundle.query_str, top_k=self._similarity_top_k, **self._kwargs, )
from typing import Union import numpy as np import PIL.Image import torch from torchvision import tv_tensors from torchvision.transforms import functional as _F @torch.jit.unused def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> tv_tensors.Image: """See :class:`~torchvision.transforms.v2.ToImage` for details.""" if isinstance(inpt, np.ndarray): output = torch.from_numpy(np.atleast_3d(inpt)).permute((2, 0, 1)).contiguous() elif isinstance(inpt, PIL.Image.Image): output = pil_to_tensor(inpt) elif isinstance(inpt, torch.Tensor): output = inpt else: raise TypeError( f"Input can either be a pure Tensor, a numpy array, or a PIL image, but got {type(inpt)} instead." ) return tv_tensors.Image(output) to_pil_image = _F.to_pil_image pil_to_tensor = _F.pil_to_tensor
from typing import Union import numpy as np import PIL.Image import torch from torchvision import tv_tensors from torchvision.transforms import functional as _F @torch.jit.unused def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> tv_tensors.Image: """See :class:`~torchvision.transforms.v2.ToImage` for details.""" if isinstance(inpt, np.ndarray): output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous() elif isinstance(inpt, PIL.Image.Image): output = pil_to_tensor(inpt) elif isinstance(inpt, torch.Tensor): output = inpt else: raise TypeError( f"Input can either be a pure Tensor, a numpy array, or a PIL image, but got {type(inpt)} instead." ) return tv_tensors.Image(output) to_pil_image = _F.to_pil_image pil_to_tensor = _F.pil_to_tensor
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' albu_train_transforms = [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict( type='OneOf', transforms=[ dict( type='RGBShift', r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1.0), dict( type='HueSaturationValue', hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.0) ], p=0.1), dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict( type='Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags'], min_visibility=0.0, filter_lost_elements=True), keymap={ 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' }, skip_img_without_anno=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' albu_train_transforms = [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict( type='OneOf', transforms=[ dict( type='RGBShift', r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1.0), dict( type='HueSaturationValue', hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.0) ], p=0.1), dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict( type='Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags'], min_visibility=0.0, filter_lost_elements=True), keymap={ 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' }, skip_img_without_anno=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMomentumEMA from .inverted_residual import InvertedResidual from .matrix_nms import mask_matrix_nms from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder from .normed_predictor import NormedConv2d, NormedLinear from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder from .positional_encoding import (LearnedPositionalEncoding, SinePositionalEncoding) from .res_layer import ResLayer, SimplifiedBasicBlock from .se_layer import ChannelAttention, DyReLU, SELayer # yapf: disable from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator, ConditionalAttention, ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer, DABDetrTransformerDecoder, DABDetrTransformerDecoderLayer, DABDetrTransformerEncoder, DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer, DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer, DetrTransformerDecoder, DetrTransformerDecoderLayer, DetrTransformerEncoder, DetrTransformerEncoderLayer, DinoTransformerDecoder, DynamicConv, Mask2FormerTransformerDecoder, Mask2FormerTransformerDecoderLayer, Mask2FormerTransformerEncoder, PatchEmbed, PatchMerging, coordinate_to_encoding, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw) # yapf: enable __all__ = [ 'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder', 'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging', 'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual', 'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU', 'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP', 'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer', 'DetrTransformerEncoder', 'DetrTransformerDecoder', 'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder', 'DeformableDetrTransformerEncoderLayer', 'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding', 'coordinate_to_encoding', 'ConditionalAttention', 'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder', 'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder', 'CdnQueryGenerator', 'Mask2FormerTransformerEncoder', 'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder' ]
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMomentumEMA from .inverted_residual import InvertedResidual from .matrix_nms import mask_matrix_nms from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder from .normed_predictor import NormedConv2d, NormedLinear from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder from .positional_encoding import (LearnedPositionalEncoding, SinePositionalEncoding) from .res_layer import ResLayer, SimplifiedBasicBlock from .se_layer import ChannelAttention, DyReLU, SELayer from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, DynamicConv, PatchEmbed, PatchMerging, Transformer, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw) __all__ = [ 'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder', 'MSDeformAttnPixelDecoder', 'ResLayer', 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer', 'PatchMerging', 'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual', 'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU', 'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU' ]
from ._conformer_wav2vec2 import ( conformer_wav2vec2_base, conformer_wav2vec2_model, conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large, conformer_wav2vec2_pretrain_model, ConformerWav2Vec2PretrainModel, ) from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model from .conv_emformer import ConvEmformer from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder from .rnnt import conformer_rnnt_base, conformer_rnnt_model from .squim import squim_objective_base, squim_objective_model, SquimObjective __all__ = [ "conformer_rnnt_base", "conformer_rnnt_model", "ConvEmformer", "conformer_wav2vec2_model", "conformer_wav2vec2_base", "conformer_wav2vec2_pretrain_model", "conformer_wav2vec2_pretrain_base", "conformer_wav2vec2_pretrain_large", "ConformerWav2Vec2PretrainModel", "emformer_hubert_base", "emformer_hubert_model", "HiFiGANVocoder", "hifigan_vocoder_v1", "hifigan_vocoder_v2", "hifigan_vocoder_v3", "hifigan_vocoder", "squim_objective_base", "squim_objective_model", "SquimObjective", ]
from ._conformer_wav2vec2 import ( conformer_wav2vec2_base, conformer_wav2vec2_model, conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large, conformer_wav2vec2_pretrain_model, ConformerWav2Vec2PretrainModel, ) from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model from .conv_emformer import ConvEmformer from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder from .rnnt import conformer_rnnt_base, conformer_rnnt_model from .squim import SQUIM_OBJECTIVE, squim_objective_base, squim_objective_model __all__ = [ "conformer_rnnt_base", "conformer_rnnt_model", "ConvEmformer", "conformer_wav2vec2_model", "conformer_wav2vec2_base", "conformer_wav2vec2_pretrain_model", "conformer_wav2vec2_pretrain_base", "conformer_wav2vec2_pretrain_large", "ConformerWav2Vec2PretrainModel", "emformer_hubert_base", "emformer_hubert_model", "HiFiGANVocoder", "hifigan_vocoder_v1", "hifigan_vocoder_v2", "hifigan_vocoder_v3", "hifigan_vocoder", "squim_objective_base", "squim_objective_model", "SQUIM_OBJECTIVE", ]
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess from pathlib import Path from typing import Dict import numpy as np import pytest from jina import Document, DocumentArray from PIL import Image @pytest.fixture() def test_dir() -> str: return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def docs_with_blobs() -> DocumentArray: return DocumentArray( [Document(blob=np.ones((3, 10, 10), dtype=np.float32)) for _ in range(10)] ) @pytest.fixture() def docs_with_chunk_blobs() -> DocumentArray: return DocumentArray( [ Document(chunks=[Document(blob=np.ones((3, 10, 10), dtype=np.float32))]) for _ in range(10) ] ) @pytest.fixture() def docs_with_chunk_chunk_blobs() -> DocumentArray: return DocumentArray( [ Document( chunks=[ Document( chunks=[ Document(blob=np.ones((3, 10, 10), dtype=np.float32)) for _ in range(10) ] ) ] ) ] ) @pytest.fixture() def test_images(test_dir: str) -> Dict[str, np.ndarray]: def get_path(file_name_no_suffix: str) -> str: return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png') return { file_name: np.array(Image.open(get_path(file_name)), dtype=np.float32)[ :, :, 0:3 ] / 255 for file_name in ['airplane', 'banana1', 'banana2', 'satellite', 'studio'] } @pytest.fixture(scope='session') def docker_image_name() -> str: return Path(__file__).parents[1].stem.lower() @pytest.fixture(scope='session') def build_docker_image(docker_image_name: str) -> str: subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True) return docker_image_name @pytest.fixture(scope='session') def build_docker_image_gpu(docker_image_name: str) -> str: image_name = f'{docker_image_name}:gpu' subprocess.run( ['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True ) return image_name
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from typing import Dict import numpy as np import pytest from PIL import Image from jina import DocumentArray, Document @pytest.fixture() def test_dir() -> str: return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def docs_with_blobs() -> DocumentArray: return DocumentArray([ Document(blob=np.ones((3, 10, 10), dtype=np.float32)) for _ in range(10) ]) @pytest.fixture() def docs_with_chunk_blobs() -> DocumentArray: return DocumentArray([ Document( chunks=[Document(blob=np.ones((3, 10, 10), dtype=np.float32))]) for _ in range(10) ]) @pytest.fixture() def docs_with_chunk_chunk_blobs() -> DocumentArray: return DocumentArray([ Document( chunks=[Document( chunks=[Document(blob=np.ones((3, 10, 10), dtype=np.float32)) for _ in range(10)])]) ]) @pytest.fixture() def test_images(test_dir: str) -> Dict[str, np.ndarray]: def get_path(file_name_no_suffix: str) -> str: return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png') return { file_name: np.array(Image.open(get_path(file_name)), dtype=np.float32)[:, :, 0:3] / 255 for file_name in [ 'airplane', 'banana1', 'banana2', 'satellite', 'studio' ] }
from dataclasses import dataclass, field from typing import Union from transformers import TrainingArguments as TransformersTrainingArguments from transformers.utils import ExplicitEnum class BatchSamplers(ExplicitEnum): """ Stores the acceptable string identifiers for batch samplers. The batch sampler is responsible for determining how samples are grouped into batches during training. Valid options are: - ``BatchSamplers.BATCH_SAMPLER``: The default PyTorch batch sampler. - ``BatchSamplers.NO_DUPLICATES``: Ensures no duplicate samples in a batch. - ``BatchSamplers.GROUP_BY_LABEL``: Ensures each batch has 2+ samples from the same label. """ BATCH_SAMPLER = "batch_sampler" NO_DUPLICATES = "no_duplicates" GROUP_BY_LABEL = "group_by_label" class MultiDatasetBatchSamplers(ExplicitEnum): """ Stores the acceptable string identifiers for multi-dataset batch samplers. The multi-dataset batch sampler is responsible for determining in what order batches are sampled from multiple datasets during training. Valid options are: - ``MultiDatasetBatchSamplers.ROUND_ROBIN``: Round-robin sampling from each dataset until one is exhausted. With this strategy, it's likely that not all samples from each dataset are used, but each dataset is sampled from equally. - ``MultiDatasetBatchSamplers.PROPORTIONAL``: Sample from each dataset in proportion to its size [default]. With this strategy, all samples from each dataset are used and larger datasets are sampled from more frequently. """ ROUND_ROBIN = "round_robin" # Round-robin sampling from each dataset PROPORTIONAL = "proportional" # Sample from each dataset in proportion to its size [default] @dataclass class SentenceTransformerTrainingArguments(TransformersTrainingArguments): """ SentenceTransformerTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of available arguments. Args: output_dir (`str`): The output directory where the model checkpoints will be written. batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*): The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options. Defaults to ``BatchSamplers.BATCH_SAMPLER``. multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*): The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers` for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``. """ batch_sampler: Union[BatchSamplers, str] = field( default=BatchSamplers.BATCH_SAMPLER, metadata={"help": "The batch sampler to use."} ) multi_dataset_batch_sampler: Union[MultiDatasetBatchSamplers, str] = field( default=MultiDatasetBatchSamplers.PROPORTIONAL, metadata={"help": "The multi-dataset batch sampler to use."} ) def __post_init__(self): super().__post_init__() self.batch_sampler = BatchSamplers(self.batch_sampler) self.multi_dataset_batch_sampler = MultiDatasetBatchSamplers(self.multi_dataset_batch_sampler) # The `compute_loss` method in `SentenceTransformerTrainer` is overridden to only compute the prediction loss, # so we set `prediction_loss_only` to `True` here to avoid self.prediction_loss_only = True # Disable broadcasting of buffers to avoid `RuntimeError: one of the variables needed for gradient computation # has been modified by an inplace operation.` when training with DDP & a BertModel-based model. self.ddp_broadcast_buffers = False
from dataclasses import dataclass, field from typing import Union from transformers import TrainingArguments as TransformersTrainingArguments from transformers.utils import ExplicitEnum class BatchSamplers(ExplicitEnum): """ Stores the acceptable string identifiers for batch samplers. The batch sampler is responsible for determining how samples are grouped into batches during training. Valid options are: - ``BatchSamplers.BATCH_SAMPLER``: The default PyTorch batch sampler. - ``BatchSamplers.NO_DUPLICATES``: Ensures no duplicate samples in a batch. - ``BatchSamplers.GROUP_BY_LABEL``: Ensures each batch has 2+ samples from the same label. """ BATCH_SAMPLER = "batch_sampler" NO_DUPLICATES = "no_duplicates" GROUP_BY_LABEL = "group_by_label" class MultiDatasetBatchSamplers(ExplicitEnum): """ Stores the acceptable string identifiers for multi-dataset batch samplers. The multi-dataset batch sampler is responsible for determining in what order batches are sampled from multiple datasets during training. Valid options are: - ``MultiDatasetBatchSamplers.ROUND_ROBIN``: Round-robin sampling from each dataset until one is exhausted. With this strategy, it's likely that not all samples from each dataset are used, but each dataset is sampled from equally. - ``MultiDatasetBatchSamplers.PROPORTIONAL``: Sample from each dataset in proportion to its size [default]. With this strategy, all samples from each dataset are used and larger datasets are sampled from more frequently. """ ROUND_ROBIN = "round_robin" # Round-robin sampling from each dataset PROPORTIONAL = "proportional" # Sample from each dataset in proportion to its size [default] @dataclass class SentenceTransformerTrainingArguments(TransformersTrainingArguments): """ SentenceTransformerTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of available arguments. Args: output_dir (`str`): The output directory where the model checkpoints will be written. batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*): The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options. Defaults to ``BatchSamplers.BATCH_SAMPLER``. multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*): The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers` for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``. """ batch_sampler: Union[BatchSamplers, str] = field( default=BatchSamplers.BATCH_SAMPLER, metadata={"help": "The batch sampler to use."} ) multi_dataset_batch_sampler: Union[MultiDatasetBatchSamplers, str] = field( default=MultiDatasetBatchSamplers.PROPORTIONAL, metadata={"help": "The multi-dataset batch sampler to use."} ) def __post_init__(self): super().__post_init__() self.batch_sampler = BatchSamplers(self.batch_sampler) self.multi_dataset_batch_sampler = MultiDatasetBatchSamplers(self.multi_dataset_batch_sampler) # The `compute_loss` method in `SentenceTransformerTrainer` is overridden to only compute the prediction loss, # so we set `prediction_loss_only` to `True` here to avoid self.prediction_loss_only = True
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import Neo4jVector from langchain_community.vectorstores.neo4j_vector import SearchType # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "SearchType": "langchain_community.vectorstores.neo4j_vector", "Neo4jVector": "langchain_community.vectorstores", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "Neo4jVector", "SearchType", ]
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import Neo4jVector from langchain_community.vectorstores.neo4j_vector import SearchType # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "SearchType": "langchain_community.vectorstores.neo4j_vector", "Neo4jVector": "langchain_community.vectorstores", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "SearchType", "Neo4jVector", ]
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=128), backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(1, 2, 3), # Please only add indices that would be used # in FPN, otherwise some parameter will not be used with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=[ dict( type='FPN', in_channels=[384, 768, 1536], out_channels=256, start_level=0, add_extra_convs='on_output', num_outs=5), dict( type='DyHead', in_channels=256, out_channels=256, num_blocks=6, # disable zero_init_offset to follow official implementation zero_init_offset=False) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, pred_kernel_size=1, # follow DyHead official implementation stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128], center_offset=0.5), # follow DyHead official implementation bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # dataset settings train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(2000, 480), (2000, 1200)], keep_ratio=True, backend='pillow'), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='Resize', scale=(2000, 1200), keep_ratio=True, backend='pillow'), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( dataset=dict( _delete_=True, type='RepeatDataset', times=2, dataset=dict( type={{_base_.dataset_type}}, data_root={{_base_.data_root}}, ann_file='annotations/instances_train2017.json', data_prefix=dict(img='train2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline, backend_args={{_base_.backend_args}}))) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader # optimizer optim_wrapper = dict( _delete_=True, type='OptimWrapper', optimizer=dict( type='AdamW', lr=0.00005, betas=(0.9, 0.999), weight_decay=0.05), paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) }), clip_grad=None)
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=128), backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(1, 2, 3), # Please only add indices that would be used # in FPN, otherwise some parameter will not be used with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=[ dict( type='FPN', in_channels=[384, 768, 1536], out_channels=256, start_level=0, add_extra_convs='on_output', num_outs=5), dict( type='DyHead', in_channels=256, out_channels=256, num_blocks=6, # disable zero_init_offset to follow official implementation zero_init_offset=False) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, pred_kernel_size=1, # follow DyHead official implementation stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128], center_offset=0.5), # follow DyHead official implementation bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # dataset settings train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(2000, 480), (2000, 1200)], keep_ratio=True, backend='pillow'), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] test_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='Resize', scale=(2000, 1200), keep_ratio=True, backend='pillow'), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( dataset=dict( _delete_=True, type='RepeatDataset', times=2, dataset=dict( type={{_base_.dataset_type}}, data_root={{_base_.data_root}}, ann_file='annotations/instances_train2017.json', data_prefix=dict(img='train2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline))) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader # optimizer optim_wrapper = dict( _delete_=True, type='OptimWrapper', optimizer=dict( type='AdamW', lr=0.00005, betas=(0.9, 0.999), weight_decay=0.05), paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) }), clip_grad=None)
"""Example selectors. **Example selector** implements logic for selecting examples to include them in prompts. This allows us to select examples that are most relevant to the input. """ from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.example_selectors.base import BaseExampleSelector from langchain_core.example_selectors.length_based import ( LengthBasedExampleSelector, ) from langchain_core.example_selectors.semantic_similarity import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector, sorted_values, ) __all__ = [ "BaseExampleSelector", "LengthBasedExampleSelector", "MaxMarginalRelevanceExampleSelector", "SemanticSimilarityExampleSelector", "sorted_values", ] _dynamic_imports = { "BaseExampleSelector": "base", "LengthBasedExampleSelector": "length_based", "MaxMarginalRelevanceExampleSelector": "semantic_similarity", "SemanticSimilarityExampleSelector": "semantic_similarity", "sorted_values": "semantic_similarity", } def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) package = __spec__.parent # type: ignore[name-defined] if module_name == "__module__" or module_name is None: result = import_module(f".{attr_name}", package=package) else: module = import_module(f".{module_name}", package=package) result = getattr(module, attr_name) globals()[attr_name] = result return result def __dir__() -> list[str]: return list(__all__)
"""Example selectors. **Example selector** implements logic for selecting examples to include them in prompts. This allows us to select examples that are most relevant to the input. """ from langchain_core.example_selectors.base import BaseExampleSelector from langchain_core.example_selectors.length_based import ( LengthBasedExampleSelector, ) from langchain_core.example_selectors.semantic_similarity import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector, sorted_values, ) __all__ = [ "BaseExampleSelector", "LengthBasedExampleSelector", "MaxMarginalRelevanceExampleSelector", "SemanticSimilarityExampleSelector", "sorted_values", ]
"""Argparser module for Pod runtimes""" import argparse from jina import helper from jina.enums import PodRoleType from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group def mixin_pod_parser(parser, port_monitoring=True): """Mixing in arguments required by :class:`Pod` into the given parser. :param parser: the parser instance to which we add arguments :param port_monitoring: if to include the port parsing """ gp = add_arg_group(parser, title='Pod') gp.add_argument( '--runtime-cls', type=str, default='WorkerRuntime', help='The runtime class to run inside the Pod', ) gp.add_argument( '--timeout-ready', type=int, default=600000, help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting ' 'forever', ) gp.add_argument( '--env', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help='The map of environment variables that are available inside runtime', ) # hidden CLI used for internal only gp.add_argument( '--shard-id', type=int, default=0, help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--pod-role', type=PodRoleType.from_string, choices=list(PodRoleType), default=PodRoleType.WORKER, help='The role of this Pod in a Deployment' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--noblock-on-start', action='store_true', default=False, help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on ' '`wait_start_success` at outer function for the postpone check.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--shards', type=int, default=1, help='The number of shards in the deployment running at the same time. For more details check ' 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies', ) gp.add_argument( '--replicas', type=int, default=1, help='The number of replicas in the deployment', ) gp.add_argument( '--port', type=int, default=helper.random_port(), help='The port for input data to bind to, default is a random port between [49152, 65535]', ) gp.add_argument( '--monitoring', action='store_true', default=False, help='If set, spawn an http server with a prometheus endpoint to expose metrics', ) if port_monitoring: gp.add_argument( '--port-monitoring', type=int, default=helper.random_port(), dest='port_monitoring', help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]', ) gp.add_argument( '--retries', type=int, default=-1, dest='retries', help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)', ) gp.add_argument( '--floating', action='store_true', default=False, help='If set, the current Pod/Deployment can not be further chained, ' 'and the next `.add()` will chain after the last Pod/Deployment not this current one.', )
"""Argparser module for Pod runtimes""" import argparse from jina import helper from jina.enums import PodRoleType from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group def mixin_pod_parser(parser): """Mixing in arguments required by :class:`Pod` into the given parser. :param parser: the parser instance to which we add arguments """ gp = add_arg_group(parser, title='Pod') gp.add_argument( '--runtime-cls', type=str, default='WorkerRuntime', help='The runtime class to run inside the Pod', ) gp.add_argument( '--timeout-ready', type=int, default=600000, help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting ' 'forever', ) gp.add_argument( '--env', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help='The map of environment variables that are available inside runtime', ) # hidden CLI used for internal only gp.add_argument( '--shard-id', type=int, default=0, help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--pod-role', type=PodRoleType.from_string, choices=list(PodRoleType), default=PodRoleType.WORKER, help='The role of this Pod in a Deployment' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--noblock-on-start', action='store_true', default=False, help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on ' '`wait_start_success` at outer function for the postpone check.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--shards', type=int, default=1, help='The number of shards in the deployment running at the same time. For more details check ' 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies', ) gp.add_argument( '--replicas', type=int, default=1, help='The number of replicas in the deployment', ) gp.add_argument( '--port', type=int, default=helper.random_port(), help='The port for input data to bind to, default is a random port between [49152, 65535]', ) gp.add_argument( '--monitoring', action='store_true', default=False, help='If set, spawn an http server with a prometheus endpoint to expose metrics', ) gp.add_argument( '--port-monitoring', type=int, default=helper.random_port(), # default prometheus server port dest='port_monitoring', help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]', ) gp.add_argument( '--retries', type=int, default=-1, dest='retries', help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)', ) gp.add_argument( '--floating', action='store_true', default=False, help='If set, the current Pod/Deployment can not be further chained, ' 'and the next `.add()` will chain after the last Pod/Deployment not this current one.', )
# Copyright (c) OpenMMLab. All rights reserved. from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, get_classes, imagenet_det_classes, imagenet_vid_classes, voc_classes) from .eval_hooks import DistEvalHook, EvalHook from .mean_ap import average_precision, eval_map, print_map_summary from .panoptic_utils import INSTANCE_OFFSET from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, print_recall_summary) __all__ = [ 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall', 'INSTANCE_OFFSET' ]
# Copyright (c) OpenMMLab. All rights reserved. from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, get_classes, imagenet_det_classes, imagenet_vid_classes, voc_classes) from .eval_hooks import DistEvalHook, EvalHook from .mean_ap import average_precision, eval_map, print_map_summary from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, print_recall_summary) __all__ = [ 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall' ]
from typing import Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray') @_register_proto(proto_type_name='video_ndarray') class VideoNdArray(NdArray, VideoTensorMixin): """ Subclass of [`NdArray`][docarray.typing.NdArray], to represent a video tensor. Adds video-specific features to the tensor. --- ```python from typing import Optional import numpy as np from pydantic import parse_obj_as from docarray import BaseDoc from docarray.typing import VideoNdArray, VideoUrl class MyVideoDoc(BaseDoc): title: str url: Optional[VideoUrl] = None video_tensor: Optional[VideoNdArray] = None doc_1 = MyVideoDoc( title='my_first_video_doc', video_tensor=np.random.random((100, 224, 224, 3)), ) doc_2 = MyVideoDoc( title='my_second_video_doc', url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true', ) doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video) # doc_2.video_tensor.save(file_path='/tmp/file_2.mp4') ``` --- """ @classmethod def _docarray_validate( cls: Type[T], value: Union[T, np.ndarray, List[Any], Tuple[Any], Any], ) -> T: tensor = super()._docarray_validate(value=value) return cls.validate_shape(value=tensor)
from typing import Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray') @_register_proto(proto_type_name='video_ndarray') class VideoNdArray(NdArray, VideoTensorMixin): """ Subclass of [`NdArray`][docarray.typing.NdArray], to represent a video tensor. Adds video-specific features to the tensor. --- ```python from typing import Optional import numpy as np from pydantic import parse_obj_as from docarray import BaseDoc from docarray.typing import VideoNdArray, VideoUrl class MyVideoDoc(BaseDoc): title: str url: Optional[VideoUrl] video_tensor: Optional[VideoNdArray] doc_1 = MyVideoDoc( title='my_first_video_doc', video_tensor=np.random.random((100, 224, 224, 3)), ) doc_2 = MyVideoDoc( title='my_second_video_doc', url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true', ) doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video) # doc_2.video_tensor.save(file_path='/tmp/file_2.mp4') ``` --- """ @classmethod def _docarray_validate( cls: Type[T], value: Union[T, np.ndarray, List[Any], Tuple[Any], Any], ) -> T: tensor = super()._docarray_validate(value=value) return cls.validate_shape(value=tensor)
from typing import Dict import torch.nn.functional as F from torch import Tensor, nn class Normalize(nn.Module): """This layer normalizes embeddings to unit length""" def __init__(self) -> None: super(Normalize, self).__init__() def forward(self, features: Dict[str, Tensor]) -> Dict[str, Tensor]: features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)}) return features def save(self, output_path) -> None: pass @staticmethod def load(input_path) -> "Normalize": return Normalize()
from typing import Dict import torch.nn.functional as F from torch import Tensor, nn class Normalize(nn.Module): """This layer normalizes embeddings to unit length""" def __init__(self): super(Normalize, self).__init__() def forward(self, features: Dict[str, Tensor]): features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)}) return features def save(self, output_path): pass @staticmethod def load(input_path): return Normalize()
# Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .data_structures import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .hook import * # noqa: F401, F403 from .mask import * # noqa: F401, F403 from .optimizers import * # noqa: F401, F403 from .post_processing import * # noqa: F401, F403 from .utils import * # noqa: F401, F403 from .visualization import * # noqa: F401, F403
# Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .data_structures import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .hook import * # noqa: F401, F403 from .mask import * # noqa: F401, F403 from .optimizers import * # noqa: F401, F403 from .post_processing import * # noqa: F401, F403 from .utils import * # noqa: F401, F403
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.legacy.losses import Reduction from keras.src.losses import deserialize from keras.src.losses import get from keras.src.losses import serialize from keras.src.losses.loss import Loss from keras.src.losses.losses import CTC from keras.src.losses.losses import BinaryCrossentropy from keras.src.losses.losses import BinaryFocalCrossentropy from keras.src.losses.losses import CategoricalCrossentropy from keras.src.losses.losses import CategoricalFocalCrossentropy from keras.src.losses.losses import CategoricalHinge from keras.src.losses.losses import Circle from keras.src.losses.losses import CosineSimilarity from keras.src.losses.losses import Dice from keras.src.losses.losses import Hinge from keras.src.losses.losses import Huber from keras.src.losses.losses import KLDivergence from keras.src.losses.losses import LogCosh from keras.src.losses.losses import MeanAbsoluteError from keras.src.losses.losses import MeanAbsolutePercentageError from keras.src.losses.losses import MeanSquaredError from keras.src.losses.losses import MeanSquaredLogarithmicError from keras.src.losses.losses import Poisson from keras.src.losses.losses import SparseCategoricalCrossentropy from keras.src.losses.losses import SquaredHinge from keras.src.losses.losses import Tversky from keras.src.losses.losses import binary_crossentropy from keras.src.losses.losses import binary_focal_crossentropy from keras.src.losses.losses import categorical_crossentropy from keras.src.losses.losses import categorical_focal_crossentropy from keras.src.losses.losses import categorical_hinge from keras.src.losses.losses import circle from keras.src.losses.losses import cosine_similarity from keras.src.losses.losses import ctc from keras.src.losses.losses import dice from keras.src.losses.losses import hinge from keras.src.losses.losses import huber from keras.src.losses.losses import kl_divergence as KLD from keras.src.losses.losses import kl_divergence as kld from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence from keras.src.losses.losses import log_cosh as logcosh from keras.src.losses.losses import mean_absolute_error as MAE from keras.src.losses.losses import mean_absolute_error as mae from keras.src.losses.losses import mean_absolute_percentage_error as MAPE from keras.src.losses.losses import mean_absolute_percentage_error as mape from keras.src.losses.losses import mean_squared_error as MSE from keras.src.losses.losses import mean_squared_error as mse from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE from keras.src.losses.losses import mean_squared_logarithmic_error as msle from keras.src.losses.losses import poisson from keras.src.losses.losses import sparse_categorical_crossentropy from keras.src.losses.losses import squared_hinge from keras.src.losses.losses import tversky
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.legacy.losses import Reduction from keras.src.losses import deserialize from keras.src.losses import get from keras.src.losses import serialize from keras.src.losses.loss import Loss from keras.src.losses.losses import CTC from keras.src.losses.losses import BinaryCrossentropy from keras.src.losses.losses import BinaryFocalCrossentropy from keras.src.losses.losses import CategoricalCrossentropy from keras.src.losses.losses import CategoricalFocalCrossentropy from keras.src.losses.losses import CategoricalHinge from keras.src.losses.losses import CosineSimilarity from keras.src.losses.losses import Dice from keras.src.losses.losses import Hinge from keras.src.losses.losses import Huber from keras.src.losses.losses import KLDivergence from keras.src.losses.losses import LogCosh from keras.src.losses.losses import MeanAbsoluteError from keras.src.losses.losses import MeanAbsolutePercentageError from keras.src.losses.losses import MeanSquaredError from keras.src.losses.losses import MeanSquaredLogarithmicError from keras.src.losses.losses import Poisson from keras.src.losses.losses import SparseCategoricalCrossentropy from keras.src.losses.losses import SquaredHinge from keras.src.losses.losses import Tversky from keras.src.losses.losses import binary_crossentropy from keras.src.losses.losses import binary_focal_crossentropy from keras.src.losses.losses import categorical_crossentropy from keras.src.losses.losses import categorical_focal_crossentropy from keras.src.losses.losses import categorical_hinge from keras.src.losses.losses import cosine_similarity from keras.src.losses.losses import ctc from keras.src.losses.losses import dice from keras.src.losses.losses import hinge from keras.src.losses.losses import huber from keras.src.losses.losses import kl_divergence as KLD from keras.src.losses.losses import kl_divergence as kld from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence from keras.src.losses.losses import log_cosh as logcosh from keras.src.losses.losses import mean_absolute_error as MAE from keras.src.losses.losses import mean_absolute_error as mae from keras.src.losses.losses import mean_absolute_percentage_error as MAPE from keras.src.losses.losses import mean_absolute_percentage_error as mape from keras.src.losses.losses import mean_squared_error as MSE from keras.src.losses.losses import mean_squared_error as mse from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE from keras.src.losses.losses import mean_squared_logarithmic_error as msle from keras.src.losses.losses import poisson from keras.src.losses.losses import sparse_categorical_crossentropy from keras.src.losses.losses import squared_hinge from keras.src.losses.losses import tversky
_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_12gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), neck=dict( type='FPN', in_channels=[224, 448, 896, 2240], out_channels=256, num_outs=5))
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_12gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), neck=dict( type='FPN', in_channels=[224, 448, 896, 2240], out_channels=256, num_outs=5))
import urllib.parse from typing import ClassVar, Optional from backend.data.model import OAuth2Credentials, ProviderName from backend.integrations.oauth.base import BaseOAuthHandler from backend.util.request import Requests class TodoistOAuthHandler(BaseOAuthHandler): PROVIDER_NAME = ProviderName.TODOIST DEFAULT_SCOPES: ClassVar[list[str]] = [ "task:add", "data:read", "data:read_write", "data:delete", "project:delete", ] AUTHORIZE_URL = "https://todoist.com/oauth/authorize" TOKEN_URL = "https://todoist.com/oauth/access_token" def __init__(self, client_id: str, client_secret: str, redirect_uri: str): self.client_id = client_id self.client_secret = client_secret self.redirect_uri = redirect_uri def get_login_url( self, scopes: list[str], state: str, code_challenge: Optional[str] ) -> str: params = { "client_id": self.client_id, "scope": ",".join(self.DEFAULT_SCOPES), "state": state, } return f"{self.AUTHORIZE_URL}?{urllib.parse.urlencode(params)}" def exchange_code_for_tokens( self, code: str, scopes: list[str], code_verifier: Optional[str] ) -> OAuth2Credentials: """Exchange authorization code for access tokens""" data = { "client_id": self.client_id, "client_secret": self.client_secret, "code": code, "redirect_uri": self.redirect_uri, } response = Requests().post(self.TOKEN_URL, data=data) response.raise_for_status() tokens = response.json() response = Requests().post( "https://api.todoist.com/sync/v9/sync", headers={"Authorization": f"Bearer {tokens['access_token']}"}, data={"sync_token": "*", "resource_types": '["user"]'}, ) response.raise_for_status() user_info = response.json() user_email = user_info["user"].get("email") return OAuth2Credentials( provider=self.PROVIDER_NAME, title=None, username=user_email, access_token=tokens["access_token"], refresh_token=None, access_token_expires_at=None, refresh_token_expires_at=None, scopes=scopes, ) def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: # Todoist does not support token refresh return credentials def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: return False
import urllib.parse from typing import ClassVar, Optional import requests from backend.data.model import OAuth2Credentials, ProviderName from backend.integrations.oauth.base import BaseOAuthHandler class TodoistOAuthHandler(BaseOAuthHandler): PROVIDER_NAME = ProviderName.TODOIST DEFAULT_SCOPES: ClassVar[list[str]] = [ "task:add", "data:read", "data:read_write", "data:delete", "project:delete", ] AUTHORIZE_URL = "https://todoist.com/oauth/authorize" TOKEN_URL = "https://todoist.com/oauth/access_token" def __init__(self, client_id: str, client_secret: str, redirect_uri: str): self.client_id = client_id self.client_secret = client_secret self.redirect_uri = redirect_uri def get_login_url( self, scopes: list[str], state: str, code_challenge: Optional[str] ) -> str: params = { "client_id": self.client_id, "scope": ",".join(self.DEFAULT_SCOPES), "state": state, } return f"{self.AUTHORIZE_URL}?{urllib.parse.urlencode(params)}" def exchange_code_for_tokens( self, code: str, scopes: list[str], code_verifier: Optional[str] ) -> OAuth2Credentials: """Exchange authorization code for access tokens""" data = { "client_id": self.client_id, "client_secret": self.client_secret, "code": code, "redirect_uri": self.redirect_uri, } response = requests.post(self.TOKEN_URL, data=data) response.raise_for_status() tokens = response.json() response = requests.post( "https://api.todoist.com/sync/v9/sync", headers={"Authorization": f"Bearer {tokens['access_token']}"}, data={"sync_token": "*", "resource_types": '["user"]'}, ) response.raise_for_status() user_info = response.json() user_email = user_info["user"].get("email") return OAuth2Credentials( provider=self.PROVIDER_NAME, title=None, username=user_email, access_token=tokens["access_token"], refresh_token=None, access_token_expires_at=None, refresh_token_expires_at=None, scopes=scopes, ) def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: # Todoist does not support token refresh return credentials def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: return False
from torchaudio._internal import module_utils as _mod_utils from .sox_effects import apply_effects_file, apply_effects_tensor, effect_names, init_sox_effects, shutdown_sox_effects if _mod_utils.is_sox_available(): import atexit init_sox_effects() atexit.register(shutdown_sox_effects) __all__ = [ "init_sox_effects", "shutdown_sox_effects", "effect_names", "apply_effects_tensor", "apply_effects_file", ]
from torchaudio._internal import module_utils as _mod_utils from .sox_effects import ( apply_effects_file, apply_effects_tensor, effect_names, init_sox_effects, shutdown_sox_effects, ) if _mod_utils.is_sox_available(): import atexit init_sox_effects() atexit.register(shutdown_sox_effects) __all__ = [ "init_sox_effects", "shutdown_sox_effects", "effect_names", "apply_effects_tensor", "apply_effects_file", ]
import types from typing_extensions import TYPE_CHECKING from docarray.typing.tensor.embedding.embedding import AnyEmbedding from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding # noqa from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa __all__ = ['NdArrayEmbedding', 'AnyEmbedding'] def __getattr__(name: str): lib: types.ModuleType if name == 'TorchEmbedding': import_library('torch', raise_error=True) import docarray.typing.tensor.embedding.torch as lib elif name == 'TensorFlowEmbedding': import_library('tensorflow', raise_error=True) import docarray.typing.tensor.embedding.tensorflow as lib else: raise ImportError( f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'' ) tensor_cls = getattr(lib, name) if name not in __all__: __all__.append(name) return tensor_cls
from docarray.typing.tensor.embedding.embedding import AnyEmbedding from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding __all__ = ['NdArrayEmbedding', 'AnyEmbedding'] from docarray.utils._internal.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa F401 __all__.append('TorchEmbedding') tf_available = is_tf_available() if tf_available: from docarray.typing.tensor.embedding.tensorflow import ( # noqa F401 TensorFlowEmbedding, ) __all__.append('TensorFlowEmbedding')
# dataset settings dataset_type = 'LVISV05Dataset' data_root = 'data/lvis_v0.5/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/lvis_v0.5/' # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 # backend_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) backend_args = None train_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=2, num_workers=2, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v0.5_train.json', data_prefix=dict(img='train2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline, backend_args=backend_args))) val_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v0.5_val.json', data_prefix=dict(img='val2017/'), test_mode=True, pipeline=test_pipeline, backend_args=backend_args)) test_dataloader = val_dataloader val_evaluator = dict( type='LVISMetric', ann_file=data_root + 'annotations/lvis_v0.5_val.json', metric=['bbox', 'segm'], backend_args=backend_args) test_evaluator = val_evaluator
# dataset settings dataset_type = 'LVISV05Dataset' data_root = 'data/lvis_v0.5/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=2, num_workers=2, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v0.5_train.json', data_prefix=dict(img='train2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline))) val_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v0.5_val.json', data_prefix=dict(img='val2017/'), test_mode=True, pipeline=test_pipeline)) test_dataloader = val_dataloader val_evaluator = dict( type='LVISMetric', ann_file=data_root + 'annotations/lvis_v0.5_val.json', metric=['bbox', 'segm']) test_evaluator = val_evaluator
import fastapi from .middleware import auth_middleware from .models import User, DEFAULT_USER_ID, DEFAULT_EMAIL from .config import Settings def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User: return verify_user(payload, admin_only=False) def requires_admin_user( payload: dict = fastapi.Depends(auth_middleware), ) -> User: return verify_user(payload, admin_only=True) def verify_user(payload: dict | None, admin_only: bool) -> User: if not payload: if Settings.ENABLE_AUTH: raise fastapi.HTTPException( status_code=401, detail="Authorization header is missing" ) # This handles the case when authentication is disabled payload = {"sub": DEFAULT_USER_ID, "role": "admin"} user_id = payload.get("sub") if not user_id: raise fastapi.HTTPException( status_code=401, detail="User ID not found in token" ) if admin_only and payload["role"] != "admin": raise fastapi.HTTPException(status_code=403, detail="Admin access required") return User.from_payload(payload)
import fastapi from .middleware import auth_middleware from .models import User def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User: return verify_user(payload, admin_only=False) def requires_admin_user( payload: dict = fastapi.Depends(auth_middleware), ) -> User: return verify_user(payload, admin_only=True) def verify_user(payload: dict | None, admin_only: bool) -> User: if not payload: # This handles the case when authentication is disabled payload = {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "admin"} user_id = payload.get("sub") if not user_id: raise fastapi.HTTPException( status_code=401, detail="User ID not found in token" ) if admin_only and payload["role"] != "admin": raise fastapi.HTTPException(status_code=403, detail="Admin access required") return User.from_payload(payload)
# model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( type='RetinaNet', preprocess_cfg=preprocess_cfg, backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), bbox_head=dict( type='RetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), sampler=dict( type='PseudoSampler'), # Focal loss should use PseudoSampler allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
# model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( type='RetinaNet', preprocess_cfg=preprocess_cfg, backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), bbox_head=dict( type='RetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from pathlib import Path from jina import Executor from jina.executors import BaseExecutor from PIL import Image def test_config(): ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) def test_io_images_and_text(test_dir, doc_generator_img_text, expected_text): crafter = BaseExecutor.load_config('config.yml') doc_array = doc_generator_img_text for doc in doc_array: crafter.craft(doc) chunks = doc[0].chunks assert len(chunks) == 3 # Check images for idx, c in enumerate(chunks[:2]): with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img: blob = chunks[idx].blob assert chunks[idx].mime_type == 'image/*' assert blob.shape[1], blob.shape[0] == img.size if idx == 0: assert blob.shape == (660, 1024, 3) if idx == 1: assert blob.shape == (626, 1191, 3) # Check text assert chunks[2].text == expected_text assert chunks[2].mime_type == 'text/plain' def test_io_text(doc_generator_text, expected_text): crafter = BaseExecutor.load_config('config.yml') doc_array = doc_generator_text for doc in doc_array: crafter.craft(doc) chunks = doc[0].chunks assert len(chunks) == 1 # Check test assert chunks[0].text == expected_text assert chunks[0].mime_type == 'text/plain' def test_io_img(test_dir, doc_generator_img): crafter = BaseExecutor.load_config('config.yml') doc_array = doc_generator_img for doc in doc_array: crafter.craft(doc) chunks = doc[0].chunks assert len(chunks) == 3 # Check images for idx, c in enumerate(chunks[:2]): with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img: blob = chunks[idx].blob assert chunks[idx].mime_type == 'image/*' assert blob.shape[1], blob.shape[0] == img.size if idx == 0: assert blob.shape == (660, 1024, 3) if idx == 1: assert blob.shape == (626, 1191, 3)
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import os from PIL import Image from jina import Executor from jina.executors import BaseExecutor def test_config(): ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) def test_io_images_and_text(test_dir,doc_generator_img_text, expected_text): crafter = BaseExecutor.load_config('config.yml') doc_array = doc_generator_img_text for doc in doc_array: crafter.craft(doc) chunks = doc[0].chunks assert len(chunks) == 3 # Check images for idx, c in enumerate(chunks[:2]): with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img: blob = chunks[idx].blob assert chunks[idx].mime_type == 'image/*' assert blob.shape[1], blob.shape[0] == img.size if idx == 0: assert blob.shape == (660, 1024, 3) if idx == 1: assert blob.shape == (626, 1191, 3) # Check text assert chunks[2].text == expected_text assert chunks[2].mime_type == 'text/plain' def test_io_text(doc_generator_text, expected_text): crafter = BaseExecutor.load_config('config.yml') doc_array = doc_generator_text for doc in doc_array: crafter.craft(doc) chunks = doc[0].chunks assert len(chunks) == 1 # Check test assert chunks[0].text == expected_text assert chunks[0].mime_type == 'text/plain' def test_io_img(test_dir, doc_generator_img): crafter = BaseExecutor.load_config('config.yml') doc_array = doc_generator_img for doc in doc_array: crafter.craft(doc) chunks = doc[0].chunks assert len(chunks) == 3 # Check images for idx, c in enumerate(chunks[:2]): with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img: blob = chunks[idx].blob assert chunks[idx].mime_type == 'image/*' assert blob.shape[1], blob.shape[0] == img.size if idx == 0: assert blob.shape == (660, 1024, 3) if idx == 1: assert blob.shape == (626, 1191, 3)
__version__ = '0.13.10' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.13.9' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig @MODELS.register_module() class ChannelMapper(BaseModule): """Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Default: None. norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for normalization layer. Default: None. act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". num_outs (int, optional): Number of output feature maps. There would be extra_convs when num_outs larger than the length of in_channels. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__( self, in_channels: List[int], out_channels: int, kernel_size: int = 3, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, act_cfg: OptConfigType = dict(type='ReLU'), bias: Union[bool, str] = 'auto', num_outs: int = None, init_cfg: OptMultiConfig = dict( type='Xavier', layer='Conv2d', distribution='uniform') ) -> None: super().__init__(init_cfg=init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if num_outs is None: num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule( in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, bias=bias)) if num_outs > len(in_channels): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if i == len(in_channels): in_channel = in_channels[-1] else: in_channel = out_channels self.extra_convs.append( ConvModule( in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, bias=bias)) def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]: """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if i == 0: outs.append(self.extra_convs[0](inputs[-1])) else: outs.append(self.extra_convs[i](outs[-1])) return tuple(outs)
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig @MODELS.register_module() class ChannelMapper(BaseModule): """Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Default: None. norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for normalization layer. Default: None. act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). num_outs (int, optional): Number of output feature maps. There would be extra_convs when num_outs larger than the length of in_channels. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__( self, in_channels: List[int], out_channels: int, kernel_size: int = 3, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, act_cfg: OptConfigType = dict(type='ReLU'), num_outs: int = None, init_cfg: OptMultiConfig = dict( type='Xavier', layer='Conv2d', distribution='uniform') ) -> None: super().__init__(init_cfg=init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if num_outs is None: num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule( in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if num_outs > len(in_channels): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if i == len(in_channels): in_channel = in_channels[-1] else: in_channel = out_channels self.extra_convs.append( ConvModule( in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]: """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if i == 0: outs.append(self.extra_convs[0](inputs[-1])) else: outs.append(self.extra_convs[i](outs[-1])) return tuple(outs)
# Copyright (c) OpenMMLab. All rights reserved. from contextlib import contextmanager import torch import torch.nn as nn from torch.cuda.amp import GradScaler from mmengine.registry import OPTIM_WRAPPERS from mmengine.utils import TORCH_VERSION, digit_version from .optimizer_wrapper import OptimWrapper @OPTIM_WRAPPERS.register_module() class AmpOptimWrapper(OptimWrapper): """A subclass of :class:`OptimWrapper` that supports automatic mixed precision training based on torch.cuda.amp. ``AmpOptimWrapper`` provides a unified interface with ``OptimWrapper``, so ``AmpOptimWrapper`` can be used in the same way as ``OptimWrapper``. Warnings: ``AmpOptimWrapper`` requires PyTorch >= 1.6. Args: loss_scale (float or str or dict): The initial configuration of `torch.cuda.amp.GradScaler`. See more specific arguments introduction at `PyTorch AMP <https://pytorch.org/docs/stable/amp.html?highlight=gradscalertorch.cuda.amp.GradScaler>`_ # noqa: E501 Defaults to ``dynamic``. - "dynamic": Initialize GradScale without any arguments. - float: Initialize GradScaler with ``init_scale``. - dict: Initialize GradScaler with more detail configuration. **kwargs: Keyword arguments passed to OptimWrapper. Note: If you use ``IterBasedRunner`` and enable gradient accumulation, the original `max_iters` should be multiplied by ``accumulative_counts``. """ def __init__(self, loss_scale='dynamic', **kwargs): assert digit_version(TORCH_VERSION) >= digit_version('1.6.0'), ( '`torch.cuda.amp` is only available when pytorch version >= 1.6') assert torch.cuda.is_available(), ( '``AmpOptimizerWrapper`` is only available training on gpu') super().__init__(**kwargs) self._scale_update_param = None if loss_scale == 'dynamic': # If loss_scale is a string, it must be 'dynamic', then dynamic # loss scaling will be used. self.loss_scaler = GradScaler() elif isinstance(loss_scale, float): # Static loss scaling self._scale_update_param = loss_scale self.loss_scaler = GradScaler(init_scale=loss_scale) elif isinstance(loss_scale, dict): # More specific configuration. self.loss_scaler = GradScaler(**loss_scale) else: raise TypeError('loss_scale must be of type float, dict, or ' f'"dynamic", but got {loss_scale}') def backward(self, loss: torch.Tensor): """Perform gradient back propagation with :attr:`loss_scaler`. Args: loss (torch.Tensor): The loss of current iteration. """ self.loss_scaler.scale(loss).backward() self._inner_count += 1 def step(self): """Update parameters with :attr:`loss_scaler`.""" if self.clip_grad_kwargs: self.loss_scaler.unscale_(self.optimizer) self._clip_grad() self.loss_scaler.step(self.optimizer) self.loss_scaler.update(self._scale_update_param) def state_dict(self) -> dict: """Get the state dictionary of :attr:`optimizer` and :attr:`loss_scaler`. Based on the state dictionary of the optimizer, the returned state dictionary will add a key named "loss_scaler". Returns: dict: The merged state dict of :attr:`loss_scaler` and :attr:`optimizer`. """ # save state_dict of loss_scaler state_dict = self.optimizer.state_dict() state_dict['loss_scaler'] = self.loss_scaler.state_dict() return state_dict def load_state_dict(self, state_dict: dict): """Load and parse the state dictionary of :attr:`optimizer` and :attr:`loss_scaler`. If state_dict contains "loss_scaler.", the :attr:`loss_scaler` will load the corresponding keys. Otherwise, only the :attr:`optimizer` will load the state dictionary. Args: state_dict (dict): The state dict of :attr:`optimizer` and :attr:`loss_scaler` """ if 'loss_scaler' in state_dict: self.loss_scaler.load_state_dict(state_dict.pop('loss_scaler')) self.optimizer.load_state_dict(state_dict) @contextmanager def optim_context(self, model: nn.Module): """Enables the context for mixed precision training, and enables the context for disabling gradient synchronization during gradient accumulation context. Args: model (nn.Module): The training model. """ with super().optim_context(model), torch.cuda.amp.autocast(): yield
# Copyright (c) OpenMMLab. All rights reserved. from contextlib import contextmanager import torch from torch.cuda.amp import GradScaler from mmengine.registry import OPTIM_WRAPPERS from mmengine.utils import TORCH_VERSION, digit_version from .optimizer_wrapper import OptimWrapper @OPTIM_WRAPPERS.register_module() class AmpOptimWrapper(OptimWrapper): """A subclass of :class:`OptimWrapper` that supports automatic mixed precision training based on torch.cuda.amp. ``AmpOptimWrapper`` provides a unified interface with ``OptimWrapper``, so ``AmpOptimWrapper`` can be used in the same way as ``OptimWrapper``. Warnings: ``AmpOptimWrapper`` requires PyTorch >= 1.6. Args: loss_scale (float or str or dict): The initial configuration of `torch.cuda.amp.GradScaler`. See more specific arguments introduction at `PyTorch AMP <https://pytorch.org/docs/stable/amp.html?highlight=gradscalertorch.cuda.amp.GradScaler>`_ # noqa: E501 - "dynamic": Initialize GradScale without any arguments. - float: Initialize GradScaler with ``init_scale``. - dict: Initialize GradScaler with more detail configuration. **kwargs: Keyword arguments passed to OptimWrapper. """ def __init__(self, loss_scale=512., **kwargs): assert digit_version(TORCH_VERSION) >= digit_version('1.6.0'), ( '`torch.cuda.amp` is only available when pytorch version >= 1.6') assert torch.cuda.is_available(), ( '``AmpOptimizerWrapper`` is only available training on gpu') super().__init__(**kwargs) self._scale_update_param = None if loss_scale == 'dynamic': # If loss_scale is a string, it must be 'dynamic', then dynamic # loss scaling will be used. self.loss_scaler = GradScaler() elif isinstance(loss_scale, float): # Static loss scaling self._scale_update_param = loss_scale self.loss_scaler = GradScaler(init_scale=loss_scale) elif isinstance(loss_scale, dict): # More specific configuration. self.loss_scaler = GradScaler(**loss_scale) else: raise TypeError('loss_scale must be of type float, dict, or ' f'"dynamic", but got {loss_scale}') def backward(self, loss: torch.Tensor): """Perform gradient back propagation with :attr:`loss_scaler`. Args: loss (torch.Tensor): The loss of current iteration. """ self.loss_scaler.scale(loss).backward() def step(self): """Update parameters with :attr:`loss_scaler`.""" if self.clip_grad_kwargs: self.loss_scaler.unscale_(self.optimizer) self._clip_grad() self.loss_scaler.step(self.optimizer) self.loss_scaler.update(self._scale_update_param) def state_dict(self) -> dict: """Get the state dictionary of :attr:`optimizer` and :attr:`loss_scaler`. Based on the state dictionary of the optimizer, the returned state dictionary will add a key named "loss_scaler". Returns: dict: The merged state dict of :attr:`loss_scaler` and :attr:`optimizer`. """ # save state_dict of loss_scaler state_dict = self.optimizer.state_dict() state_dict['loss_scaler'] = self.loss_scaler.state_dict() return state_dict def load_state_dict(self, state_dict: dict): """Load and parse the state dictionary of :attr:`optimizer` and :attr:`loss_scaler`. If state_dict contains "loss_scaler.", the :attr:`loss_scaler` will load the corresponding keys. Otherwise, only the :attr:`optimizer` will load the state dictionary. Args: state_dict (dict): The state dict of :attr:`optimizer` and :attr:`loss_scaler` """ if 'loss_scaler' in state_dict: self.loss_scaler.load_state_dict(state_dict.pop('loss_scaler')) self.optimizer.load_state_dict(state_dict) @contextmanager def precision_context(self): """A wrapper of ``torch.cuda.amp.autocast``""" with torch.cuda.amp.autocast(): yield
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_message_histories import PostgresChatMessageHistory # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "PostgresChatMessageHistory": "langchain_community.chat_message_histories", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "PostgresChatMessageHistory", ]
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_message_histories import PostgresChatMessageHistory # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "PostgresChatMessageHistory": "langchain_community.chat_message_histories" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "PostgresChatMessageHistory", ]
""" Remote file reader. A loader that fetches any remote page or file by URL and retrieves child pages with certain constraints. The class also parses the contents of each page and provides access to the parsed data. """ from typing import Any, Dict, List, Optional, Union import requests from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from llama_index.readers.remote import RemoteReader class RemoteDepthReader(BaseReader): def __init__( self, *args: Any, file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None, depth: int = 1, domain_lock: bool = False, **kwargs: Any, ) -> None: """Init params.""" super().__init__(*args, **kwargs) self.file_extractor = file_extractor self.depth = depth self.domain_lock = domain_lock def load_data(self, url: str) -> List[Document]: from tqdm.auto import tqdm """Parse whatever is at the URL.""" "" remote_reader = RemoteReader(file_extractor=self.file_extractor) documents = [] links = self.get_links(url) urls = {-1: [url]} # -1 is the starting point links_visited = [] for i in range(self.depth + 1): urls[i] = [] new_links = [] print(f"Reading links at depth {i}...") for link in tqdm(links): """Checking if the link belongs the provided domain.""" if (self.domain_lock and link.find(url) > -1) or (not self.domain_lock): print("Loading link: " + link) if link in links_visited: continue if link: urls[i].append(link) new_links.extend(self.get_links(link)) links_visited.append(link) else: print("Link ignored: " + link) new_links = list(set(new_links)) links = new_links print(f"Found {len(urls)} links at depth {self.depth}.") for depth_i in urls: for url in urls[depth_i]: try: documents.extend(remote_reader.load_data(url)) except Exception as e: print(f"Error reading {url} at depth {depth_i}: {e}") continue return documents @staticmethod def is_url(href) -> bool: """Check if a link is a URL.""" return href.startswith("http") def get_links(self, url) -> List[str]: from urllib.parse import urljoin, urlparse, urlunparse from bs4 import BeautifulSoup """Get all links from a page.""" page = requests.get(url) soup = BeautifulSoup(page.content, "html.parser") links = soup.find_all("a") result = [] for link in links: if isinstance(link, str): href = link else: href = link.get("href") if href is not None: if not self.is_url(href): href = urljoin(url, href) url_parsed = urlparse(href) url_without_query_string = urlunparse( (url_parsed.scheme, url_parsed.netloc, url_parsed.path, "", "", "") ) if ( url_without_query_string not in result and url_without_query_string and url_without_query_string.startswith("http") ): result.append(url_without_query_string) return result
""" Remote file reader. A loader that fetches any remote page or file by URL and retrieves child pages with certain constraints. The class also parses the contents of each page and provides access to the parsed data. """ from typing import Any, Dict, List, Optional, Union import requests from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from llama_index.readers.remote import RemoteReader class RemoteDepthReader(BaseReader): def __init__( self, *args: Any, file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None, depth: int = 1, domain_lock: bool = False, **kwargs: Any, ) -> None: """Init params.""" super().__init__(*args, **kwargs) self.file_extractor = file_extractor self.depth = depth self.domain_lock = domain_lock def load_data(self, url: str) -> List[Document]: from tqdm.auto import tqdm """Parse whatever is at the URL.""" "" remote_reader = RemoteReader(file_extractor=self.file_extractor) documents = [] links = self.get_links(url) urls = {-1: [url]} # -1 is the starting point links_visited = [] for i in range(self.depth + 1): urls[i] = [] new_links = [] print(f"Reading links at depth {i}...") for link in tqdm(links): """Checking if the link belongs the provided domain.""" if (self.domain_lock and link.find(url) > -1) or (not self.domain_lock): print("Loading link: " + link) if link in links_visited: continue if link: urls[i].append(link) new_links.extend(self.get_links(link)) links_visited.append(link) else: print("Link ignored: " + link) new_links = list(set(new_links)) links = new_links print(f"Found {len(urls)} links at depth {self.depth}.") for depth_i in urls: for url in urls[depth_i]: try: documents.extend(remote_reader.load_data(url)) except Exception as e: print(f"Error reading {url} at depth {depth_i}: {e}") continue return documents @staticmethod def is_url(href) -> bool: """Check if a link is a URL.""" return href.startswith("http") def get_links(self, url) -> List[str]: from urllib.parse import urljoin, urlparse, urlunparse from bs4 import BeautifulSoup """Get all links from a page.""" page = requests.get(url) soup = BeautifulSoup(page.content, "html.parser") links = soup.find_all("a") result = [] for link in links: if isinstance(link, str): href = link else: href = link.get("href") if href is not None: if not self.is_url(href): href = urljoin(url, href) url_parsed = urlparse(href) url_without_query_string = urlunparse( (url_parsed.scheme, url_parsed.netloc, url_parsed.path, "", "", "") ) if ( url_without_query_string not in result and url_without_query_string and url_without_query_string.startswith("http") ): result.append(url_without_query_string) return result
import importlib import pytest from dirty_equals import IsDict from fastapi.testclient import TestClient from ...utils import needs_py39 @pytest.fixture( name="client", params=[ "tutorial009", pytest.param("tutorial009_py39", marks=needs_py39), ], ) def get_client(request: pytest.FixtureRequest): mod = importlib.import_module(f"docs_src.body_nested_models.{request.param}") client = TestClient(mod.app) return client def test_post_body(client: TestClient): data = {"2": 2.2, "3": 3.3} response = client.post("/index-weights/", json=data) assert response.status_code == 200, response.text assert response.json() == data def test_post_invalid_body(client: TestClient): data = {"foo": 2.2, "3": 3.3} response = client.post("/index-weights/", json=data) assert response.status_code == 422, response.text assert response.json() == IsDict( { "detail": [ { "type": "int_parsing", "loc": ["body", "foo", "[key]"], "msg": "Input should be a valid integer, unable to parse string as an integer", "input": "foo", } ] } ) | IsDict( # TODO: remove when deprecating Pydantic v1 { "detail": [ { "loc": ["body", "__key__"], "msg": "value is not a valid integer", "type": "type_error.integer", } ] } ) def test_openapi_schema(client: TestClient): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/index-weights/": { "post": { "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, "summary": "Create Index Weights", "operationId": "create_index_weights_index_weights__post", "requestBody": { "content": { "application/json": { "schema": { "title": "Weights", "type": "object", "additionalProperties": {"type": "number"}, } } }, "required": True, }, } } }, "components": { "schemas": { "ValidationError": { "title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": { "loc": { "title": "Location", "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "integer"}] }, }, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}, }, }, "HTTPValidationError": { "title": "HTTPValidationError", "type": "object", "properties": { "detail": { "title": "Detail", "type": "array", "items": {"$ref": "#/components/schemas/ValidationError"}, } }, }, } }, }
import pytest from dirty_equals import IsDict from fastapi.testclient import TestClient @pytest.fixture(name="client") def get_client(): from docs_src.body_nested_models.tutorial009 import app client = TestClient(app) return client def test_post_body(client: TestClient): data = {"2": 2.2, "3": 3.3} response = client.post("/index-weights/", json=data) assert response.status_code == 200, response.text assert response.json() == data def test_post_invalid_body(client: TestClient): data = {"foo": 2.2, "3": 3.3} response = client.post("/index-weights/", json=data) assert response.status_code == 422, response.text assert response.json() == IsDict( { "detail": [ { "type": "int_parsing", "loc": ["body", "foo", "[key]"], "msg": "Input should be a valid integer, unable to parse string as an integer", "input": "foo", } ] } ) | IsDict( # TODO: remove when deprecating Pydantic v1 { "detail": [ { "loc": ["body", "__key__"], "msg": "value is not a valid integer", "type": "type_error.integer", } ] } ) def test_openapi_schema(client: TestClient): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/index-weights/": { "post": { "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, "summary": "Create Index Weights", "operationId": "create_index_weights_index_weights__post", "requestBody": { "content": { "application/json": { "schema": { "title": "Weights", "type": "object", "additionalProperties": {"type": "number"}, } } }, "required": True, }, } } }, "components": { "schemas": { "ValidationError": { "title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": { "loc": { "title": "Location", "type": "array", "items": { "anyOf": [{"type": "string"}, {"type": "integer"}] }, }, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}, }, }, "HTTPValidationError": { "title": "HTTPValidationError", "type": "object", "properties": { "detail": { "title": "Detail", "type": "array", "items": {"$ref": "#/components/schemas/ValidationError"}, } }, }, } }, }
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os import platform import warnings import cv2 import torch.multiprocessing as mp from mmengine import DefaultScope def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up the training if platform.system() != 'Windows': mp_start_method = cfg.get('mp_start_method', 'fork') current_method = mp.get_start_method(allow_none=True) if current_method is not None and current_method != mp_start_method: warnings.warn( f'Multi-processing start method `{mp_start_method}` is ' f'different from the previous setting `{current_method}`.' f'It will be force set to `{mp_start_method}`. You can change ' f'this behavior by changing `mp_start_method` in your config.') mp.set_start_method(mp_start_method, force=True) # disable opencv multithreading to avoid system being overloaded opencv_num_threads = cfg.get('opencv_num_threads', 0) cv2.setNumThreads(opencv_num_threads) # setup OMP threads # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa workers_per_gpu = cfg.data.get('workers_per_gpu', 1) if 'train_dataloader' in cfg.data: workers_per_gpu = \ max(cfg.data.train_dataloader.get('workers_per_gpu', 1), workers_per_gpu) if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: omp_num_threads = 1 warnings.warn( f'Setting OMP_NUM_THREADS environment variable for each process ' f'to be {omp_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) # setup MKL threads if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: mkl_num_threads = 1 warnings.warn( f'Setting MKL_NUM_THREADS environment variable for each process ' f'to be {mkl_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) def register_all_modules(init_default_scope: bool = True) -> None: """Register all modules in mmdet into the registries. Args: init_default_scope (bool): Whether initialize the mmdet default scope. When `init_default_scope=True`, the global default scope will be set to `mmdet`, and all registries will build modules from mmdet's registry node. To understand more about the registry, please refer to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md Defaults to True. """ # noqa import mmdet.core # noqa: F401,F403 import mmdet.datasets # noqa: F401,F403 import mmdet.metrics # noqa: F401,F403 import mmdet.models # noqa: F401,F403 if init_default_scope: never_created = DefaultScope.get_current_instance() is None \ or not DefaultScope.check_instance_created('mmdet') if never_created: DefaultScope.get_instance('mmdet', scope_name='mmdet') return current_scope = DefaultScope.get_current_instance() if current_scope.scope_name != 'mmdet': warnings.warn('The current default scope ' f'"{current_scope.scope_name}" is not "mmdet", ' '`register_all_modules` will force the current' 'default scope to be "mmdet". If this is not ' 'expected, please set `init_default_scope=False`.') # avoid name conflict new_instance_name = f'mmdet-{datetime.datetime.now()}' DefaultScope.get_instance(new_instance_name, scope_name='mmdet')
# Copyright (c) OpenMMLab. All rights reserved. import os import platform import warnings import cv2 import torch.multiprocessing as mp from mmengine import DefaultScope def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up the training if platform.system() != 'Windows': mp_start_method = cfg.get('mp_start_method', 'fork') current_method = mp.get_start_method(allow_none=True) if current_method is not None and current_method != mp_start_method: warnings.warn( f'Multi-processing start method `{mp_start_method}` is ' f'different from the previous setting `{current_method}`.' f'It will be force set to `{mp_start_method}`. You can change ' f'this behavior by changing `mp_start_method` in your config.') mp.set_start_method(mp_start_method, force=True) # disable opencv multithreading to avoid system being overloaded opencv_num_threads = cfg.get('opencv_num_threads', 0) cv2.setNumThreads(opencv_num_threads) # setup OMP threads # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa workers_per_gpu = cfg.data.get('workers_per_gpu', 1) if 'train_dataloader' in cfg.data: workers_per_gpu = \ max(cfg.data.train_dataloader.get('workers_per_gpu', 1), workers_per_gpu) if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: omp_num_threads = 1 warnings.warn( f'Setting OMP_NUM_THREADS environment variable for each process ' f'to be {omp_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) # setup MKL threads if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: mkl_num_threads = 1 warnings.warn( f'Setting MKL_NUM_THREADS environment variable for each process ' f'to be {mkl_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) def register_all_modules(init_default_scope: bool = True) -> None: """Register all modules in mmdet into the registries. Args: init_default_scope (bool): Whether initialize the mmdet default scope. When `init_default_scope=True`, the global default scope will be set to `mmdet`, and all registries will build modules from mmdet's registry node. To understand more about the registry, please refer to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md Defaults to True. """ # noqa import mmdet.core # noqa: F401,F403 import mmdet.datasets # noqa: F401,F403 import mmdet.metrics # noqa: F401,F403 import mmdet.models # noqa: F401,F403 if init_default_scope: DefaultScope.get_instance('mmdet', scope_name='mmdet')
import pytest # type: ignore[import-not-found] @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest # type: ignore[import-not-found] @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
import torch from torchaudio_unittest.common_utils import PytorchTestCase from torchaudio_unittest.models.rnnt_decoder.rnnt_decoder_test_impl import ( RNNTBeamSearchTestImpl, ) class RNNTBeamSearchFloat32CPUTest(RNNTBeamSearchTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cpu") class RNNTBeamSearchFloat64CPUTest(RNNTBeamSearchTestImpl, PytorchTestCase): dtype = torch.float64 device = torch.device("cpu")
import torch from torchaudio_unittest.common_utils import PytorchTestCase from torchaudio_unittest.models.rnnt_decoder.rnnt_decoder_test_impl import RNNTBeamSearchTestImpl class RNNTBeamSearchFloat32CPUTest(RNNTBeamSearchTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cpu") class RNNTBeamSearchFloat64CPUTest(RNNTBeamSearchTestImpl, PytorchTestCase): dtype = torch.float64 device = torch.device("cpu")
from __future__ import annotations import csv import gzip import os from . import InputExample class STSDataReader: """Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx) Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1 """ def __init__( self, dataset_folder, s1_col_idx=0, s2_col_idx=1, score_col_idx=2, delimiter="\t", quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5, ): self.dataset_folder = dataset_folder self.score_col_idx = score_col_idx self.s1_col_idx = s1_col_idx self.s2_col_idx = s2_col_idx self.delimiter = delimiter self.quoting = quoting self.normalize_scores = normalize_scores self.min_score = min_score self.max_score = max_score def get_examples(self, filename, max_examples=0): """filename specified which data split to use (train.csv, dev.csv, test.csv).""" filepath = os.path.join(self.dataset_folder, filename) with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open( filepath, encoding="utf-8" ) as fIn: data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting) examples = [] for id, row in enumerate(data): score = float(row[self.score_col_idx]) if self.normalize_scores: # Normalize to a 0...1 value score = (score - self.min_score) / (self.max_score - self.min_score) s1 = row[self.s1_col_idx] s2 = row[self.s2_col_idx] examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score)) if max_examples > 0 and len(examples) >= max_examples: break return examples class STSBenchmarkDataReader(STSDataReader): """Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4. Scores are normalized from 0...5 to 0...1 """ def __init__( self, dataset_folder, s1_col_idx=5, s2_col_idx=6, score_col_idx=4, delimiter="\t", quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5, ): super().__init__( dataset_folder=dataset_folder, s1_col_idx=s1_col_idx, s2_col_idx=s2_col_idx, score_col_idx=score_col_idx, delimiter=delimiter, quoting=quoting, normalize_scores=normalize_scores, min_score=min_score, max_score=max_score, )
import csv import gzip import os from . import InputExample class STSDataReader: """Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx) Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1 """ def __init__( self, dataset_folder, s1_col_idx=0, s2_col_idx=1, score_col_idx=2, delimiter="\t", quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5, ): self.dataset_folder = dataset_folder self.score_col_idx = score_col_idx self.s1_col_idx = s1_col_idx self.s2_col_idx = s2_col_idx self.delimiter = delimiter self.quoting = quoting self.normalize_scores = normalize_scores self.min_score = min_score self.max_score = max_score def get_examples(self, filename, max_examples=0): """filename specified which data split to use (train.csv, dev.csv, test.csv).""" filepath = os.path.join(self.dataset_folder, filename) with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open( filepath, encoding="utf-8" ) as fIn: data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting) examples = [] for id, row in enumerate(data): score = float(row[self.score_col_idx]) if self.normalize_scores: # Normalize to a 0...1 value score = (score - self.min_score) / (self.max_score - self.min_score) s1 = row[self.s1_col_idx] s2 = row[self.s2_col_idx] examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score)) if max_examples > 0 and len(examples) >= max_examples: break return examples class STSBenchmarkDataReader(STSDataReader): """Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4. Scores are normalized from 0...5 to 0...1 """ def __init__( self, dataset_folder, s1_col_idx=5, s2_col_idx=6, score_col_idx=4, delimiter="\t", quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5, ): super().__init__( dataset_folder=dataset_folder, s1_col_idx=s1_col_idx, s2_col_idx=s2_col_idx, score_col_idx=score_col_idx, delimiter=delimiter, quoting=quoting, normalize_scores=normalize_scores, min_score=min_score, max_score=max_score, )
from typing import Union import numpy as np import PIL.Image import torch from torchvision import datapoints from torchvision.transforms import functional as _F @torch.jit.unused def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> datapoints.Image: if isinstance(inpt, np.ndarray): output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous() elif isinstance(inpt, PIL.Image.Image): output = pil_to_tensor(inpt) elif isinstance(inpt, torch.Tensor): output = inpt else: raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.") return datapoints.Image(output) to_pil_image = _F.to_pil_image pil_to_tensor = _F.pil_to_tensor
from typing import Union import numpy as np import PIL.Image import torch from torchvision import datapoints from torchvision.transforms import functional as _F @torch.jit.unused def to_image_tensor(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> datapoints.Image: if isinstance(inpt, np.ndarray): output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous() elif isinstance(inpt, PIL.Image.Image): output = pil_to_tensor(inpt) elif isinstance(inpt, torch.Tensor): output = inpt else: raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.") return datapoints.Image(output) to_image_pil = _F.to_pil_image pil_to_tensor = _F.pil_to_tensor # We changed the names to align them with the new naming scheme. Still, `to_pil_image` is # prevalent and well understood. Thus, we just alias it without deprecating the old name. to_pil_image = to_image_pil
# dataset settings dataset_type = 'CityscapesDataset' # TODO remove it after cityscape metric # data_root = '/mnt/lustre/luochunhua.vendor/openmmlab2.0/data/cityscapes/' data_root = 'data/cityscapes/' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='Resize', scale=(2048, 1024), keep_ratio=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type='RepeatDataset', times=8, dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instancesonly_filtered_gtFine_train.json', data_prefix=dict(img='leftImg8bit/train/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline))) val_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instancesonly_filtered_gtFine_val.json', data_prefix=dict(img='leftImg8bit/val/'), test_mode=True, filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=test_pipeline)) test_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instancesonly_filtered_gtFine_val.json', data_prefix=dict(img='leftImg8bit/val/'), test_mode=True, filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=test_pipeline)) val_evaluator = dict( type='CocoMetric', ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', metric='bbox') test_evaluator = dict( type='CocoMetric', ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', metric='bbox')
# dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 1024), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=1, workers_per_gpu=2, train=dict( type='RepeatDataset', times=8, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_train.json', img_prefix=data_root + 'leftImg8bit/train/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', img_prefix=data_root + 'leftImg8bit/val/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_test.json', img_prefix=data_root + 'leftImg8bit/test/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox')
from unittest import TestCase, mock import boto3 from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode from llama_index.postprocessor.bedrock_rerank import BedrockRerank class TestBedrockRerank(TestCase): def test_class(self): names_of_base_classes = [b.__name__ for b in BedrockRerank.__mro__] self.assertIn(BaseNodePostprocessor.__name__, names_of_base_classes) def test_bedrock_rerank(self): exp_rerank_response = { "results": [ { "index": 2, "relevanceScore": 0.9, }, { "index": 3, "relevanceScore": 0.8, }, ] } input_nodes = [ NodeWithScore(node=TextNode(id_="1", text="first 1")), NodeWithScore(node=TextNode(id_="2", text="first 2")), NodeWithScore(node=TextNode(id_="3", text="last 1")), NodeWithScore(node=TextNode(id_="4", text="last 2")), ] expected_nodes = [ NodeWithScore(node=TextNode(id_="3", text="last 1"), score=0.9), NodeWithScore(node=TextNode(id_="4", text="last 2"), score=0.8), ] bedrock_client = boto3.client("bedrock-agent-runtime", region_name="us-west-2") reranker = BedrockRerank(client=bedrock_client, top_n=2) with mock.patch.object( bedrock_client, "rerank", return_value=exp_rerank_response ): query_bundle = QueryBundle(query_str="last") actual_nodes = reranker.postprocess_nodes( input_nodes, query_bundle=query_bundle ) self.assertEqual(len(actual_nodes), len(expected_nodes)) for actual_node_with_score, expected_node_with_score in zip( actual_nodes, expected_nodes ): self.assertEqual( actual_node_with_score.node.get_content(), expected_node_with_score.node.get_content(), ) self.assertAlmostEqual( actual_node_with_score.score, expected_node_with_score.score ) def test_bedrock_rerank_consistent_top_n(self): input_nodes = [NodeWithScore(node=TextNode(id_="4", text="last 1"))] bedrock_client = boto3.client("bedrock-agent-runtime", region_name="us-west-2") reranker = BedrockRerank(client=bedrock_client, top_n=4) self.assertEqual(reranker.top_n, 4) with mock.patch.object(bedrock_client, "rerank") as patched_rerank: reranker.postprocess_nodes(input_nodes, query_str="last") self.assertTrue(patched_rerank.called) num_results = patched_rerank.call_args.kwargs["rerankingConfiguration"][ "bedrockRerankingConfiguration" ]["numberOfResults"] self.assertEqual(num_results, len(input_nodes)) self.assertEqual(reranker.top_n, 4)
from unittest import TestCase, mock import boto3 from llama_index.core.postprocessor.types import ( BaseNodePostprocessor, NodeWithScore, QueryBundle, ) from llama_index.core.schema import TextNode from llama_index.postprocessor.bedrock_rerank import BedrockRerank class TestBedrockRerank(TestCase): def test_class(self): names_of_base_classes = [b.__name__ for b in BedrockRerank.__mro__] self.assertIn(BaseNodePostprocessor.__name__, names_of_base_classes) def test_bedrock_rerank(self): exp_rerank_response = { "results": [ { "index": 2, "relevanceScore": 0.9, }, { "index": 3, "relevanceScore": 0.8, }, ] } input_nodes = [ NodeWithScore(node=TextNode(id_="1", text="first 1")), NodeWithScore(node=TextNode(id_="2", text="first 2")), NodeWithScore(node=TextNode(id_="3", text="last 1")), NodeWithScore(node=TextNode(id_="4", text="last 2")), ] expected_nodes = [ NodeWithScore(node=TextNode(id_="3", text="last 1"), score=0.9), NodeWithScore(node=TextNode(id_="4", text="last 2"), score=0.8), ] bedrock_client = boto3.client("bedrock-agent-runtime", region_name="us-west-2") reranker = BedrockRerank(client=bedrock_client, num_results=2) with mock.patch.object( bedrock_client, "rerank", return_value=exp_rerank_response ): query_bundle = QueryBundle(query_str="last") actual_nodes = reranker.postprocess_nodes( input_nodes, query_bundle=query_bundle ) self.assertEqual(len(actual_nodes), len(expected_nodes)) for actual_node_with_score, expected_node_with_score in zip( actual_nodes, expected_nodes ): self.assertEqual( actual_node_with_score.node.get_content(), expected_node_with_score.node.get_content(), ) self.assertAlmostEqual( actual_node_with_score.score, expected_node_with_score.score )
# Copyright (c) Meta Platforms, Inc. and affiliates # Owner(s): ["oncall: distributed"] import torch from torch.distributed.pipelining import pipe_split, pipeline from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_utils import run_tests, TestCase # Building block for model class Block(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( in_channels=16, out_channels=16, kernel_size=3, padding=1 ) self.lin0 = torch.nn.Linear(256, 256) self.relu = torch.nn.ReLU() self.lin1 = torch.nn.Linear(256, 256) def forward(self, x: torch.Tensor, constant=None) -> torch.Tensor: x = self.conv(x) x = self.lin0(x) pipe_split() x.add(constant) x = self.lin1(x) return self.relu(x) # Full model class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.block0 = Block() self.block1 = Block() def forward(self, x: torch.Tensor, constant=None) -> torch.Tensor: x = self.block0(x, constant=constant) pipe_split() x = self.block1(x, constant=constant) return x class UnflattenTests(TestCase): def test_unflatten(self, device): x = torch.randn(1, 16, 256, 256, device=device) constant = torch.ones(1, 16, 256, 256, device=device) mod = M().to(device) pipe = pipeline( mod, (x,), {"constant": constant}, ) assert pipe.num_stages == 4 orig_state_dict = mod.state_dict() # Check qualnames for stage_idx in range(pipe.num_stages): stage_mod = pipe.get_stage_module(stage_idx) for param_name, _ in stage_mod.named_parameters(): assert param_name in orig_state_dict, ( f"{param_name} not in original state dict" ) print("Param qualname test passed") # Check equivalence ref = mod(x, constant) out = pipe(x, constant)[0] torch.testing.assert_close(out, ref) print(f"Equivalence test passed {torch.sum(out)} ref {torch.sum(ref)}") devices = ["cpu", "cuda", "hpu", "xpu"] instantiate_device_type_tests(UnflattenTests, globals(), only_for=devices) if __name__ == "__main__": run_tests()
# Copyright (c) Meta Platforms, Inc. and affiliates # Owner(s): ["oncall: distributed"] import torch from torch.distributed.pipelining import pipe_split, pipeline from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_utils import run_tests, TestCase # Building block for model class Block(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( in_channels=16, out_channels=16, kernel_size=3, padding=1 ) self.lin0 = torch.nn.Linear(256, 256) self.relu = torch.nn.ReLU() self.lin1 = torch.nn.Linear(256, 256) def forward(self, x: torch.Tensor, constant=None) -> torch.Tensor: x = self.conv(x) x = self.lin0(x) pipe_split() x.add(constant) x = self.lin1(x) return self.relu(x) # Full model class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.block0 = Block() self.block1 = Block() def forward(self, x: torch.Tensor, constant=None) -> torch.Tensor: x = self.block0(x, constant=constant) pipe_split() x = self.block1(x, constant=constant) return x class UnflattenTests(TestCase): def test_unflatten(self, device): x = torch.randn(1, 16, 256, 256, device=device) constant = torch.ones(1, 16, 256, 256, device=device) mod = M().to(device) pipe = pipeline( mod, (x,), {"constant": constant}, ) assert pipe.num_stages == 4 orig_state_dict = mod.state_dict() # Check qualnames for stage_idx in range(pipe.num_stages): stage_mod = pipe.get_stage_module(stage_idx) for param_name, _ in stage_mod.named_parameters(): assert ( param_name in orig_state_dict ), f"{param_name} not in original state dict" print("Param qualname test passed") # Check equivalence ref = mod(x, constant) out = pipe(x, constant)[0] torch.testing.assert_close(out, ref) print(f"Equivalence test passed {torch.sum(out)} ref {torch.sum(ref)}") devices = ["cpu", "cuda", "hpu", "xpu"] instantiate_device_type_tests(UnflattenTests, globals(), only_for=devices) if __name__ == "__main__": run_tests()
from pathlib import Path from typing import List import numpy as np import pytest import torch from jina import Document, DocumentArray, Executor from ..integration.test_integration import filter_none from ...transform_encoder import TransformerTorchEncoder def test_config(): ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert ( ex.pretrained_model_name_or_path == 'sentence-transformers/distilbert-base-nli-stsb-mean-tokens' ) def test_compute_tokens(): enc = TransformerTorchEncoder() tokens = enc._generate_input_tokens(["hello this is a test", "and another test"]) assert tokens["input_ids"].shape == (2, 7) assert tokens["attention_mask"].shape == (2, 7) @pytest.mark.parametrize('hidden_seqlen', [4, 8]) def test_compute_embeddings(hidden_seqlen): embedding_size = 10 enc = TransformerTorchEncoder() tokens = enc._generate_input_tokens(["hello world"]) hidden_states = tuple( torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7) ) embeddings = enc._compute_embedding( hidden_states=hidden_states, input_tokens=tokens ) assert embeddings.shape == (1, embedding_size) def test_encoding_cpu(): enc = TransformerTorchEncoder(device="cpu") input_data = DocumentArray([Document(text="hello world")]) enc.encode(docs=input_data, parameters={}) assert input_data[0].embedding.shape == (768,) @pytest.mark.skipif(not torch.cuda.is_available(), reason="GPU is needed for this test") def test_encoding_gpu(): enc = TransformerTorchEncoder(device="cuda") input_data = DocumentArray([Document(text="hello world")]) enc.encode(docs=input_data, parameters={}) assert input_data[0].embedding.shape == (768,) def test_encodes_semantic_meaning(): sentences = dict() sentences["A"] = "Hello, my name is Michael." sentences["B"] = "Today we are going to Disney World." sentences["C"] = "There are animals on the road" sentences["D"] = "A dog is running down the road" encoder = TransformerTorchEncoder() embeddings = {} for id_, sentence in sentences.items(): docs = DocumentArray([Document(text=sentence)]) encoder.encode(docs, parameters={}) embeddings[id_] = docs[0].embedding def dist(a, b): a_embedding = embeddings[a] b_embedding = embeddings[b] return np.linalg.norm(a_embedding - b_embedding) small_distance = dist("C", "D") assert small_distance < dist("C", "B") assert small_distance < dist("C", "A") assert small_distance < dist("B", "A") @pytest.mark.parametrize( ["docs", "docs_per_path", "traversal_path"], [ (pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"), ( pytest.lazy_fixture("docs_with_chunk_text"), [["r", 0], ["c", 10], ["cc", 0]], "c", ), ( pytest.lazy_fixture("docs_with_chunk_chunk_text"), [["r", 0], ["c", 0], ["cc", 10]], "cc", ), ], ) def test_traversal_path( docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str ): def validate_traversal(expected_docs_per_path: List[List[str]]): def validate(res): for path, count in expected_docs_per_path: embeddings = filter_none( DocumentArray(res).traverse_flat([path]).get_attributes("embedding") ) for emb in embeddings: if emb is None: return False assert len(embeddings) == count return validate encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path]) encoder.encode(docs, {"traversal_paths": [traversal_path]}) validate_traversal(docs_per_path)(docs) def test_multiple_traversal_paths(): sentences = list() sentences.append("Hello, my name is Michael.") sentences.append("Today we are going to Disney World.") sentences.append("There are animals on the road") sentences.append("A dog is running down the road") docs = DocumentArray([Document(text=sentence) for sentence in sentences]) for index, sent in enumerate(sentences): docs[index].chunks.append(Document(text=sent)) docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index])) encoder = TransformerTorchEncoder(default_traversal_paths=["r", "c", "cc"]) encoder.encode(docs, {}) for doc in docs: assert doc.embedding.shape == (768,) assert doc.chunks[0].embedding.shape == (768,) assert doc.chunks[0].chunks[0].embedding.shape == (768,)
import os from typing import Callable, List import numpy as np import pytest import torch from jina import Document, DocumentArray from ...transform_encoder import TransformerTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_compute_tokens(): enc = TransformerTorchEncoder() tokens = enc._generate_input_tokens(["hello this is a test", "and another test"]) assert tokens["input_ids"].shape == (2, 7) assert tokens["attention_mask"].shape == (2, 7) @pytest.mark.parametrize( 'hidden_seqlen', [4, 8] ) def test_compute_embeddings(hidden_seqlen): embedding_size = 10 enc = TransformerTorchEncoder() tokens = enc._generate_input_tokens(["hello world"]) hidden_states = tuple(torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7)) embeddings = enc._compute_embedding( hidden_states=hidden_states, input_tokens=tokens ) assert embeddings.shape == (1, embedding_size) def test_encoding_cpu(): enc = TransformerTorchEncoder(device="cpu") input_data = DocumentArray([Document(text="hello world")]) enc.encode(docs=input_data, parameters={}) assert input_data[0].embedding.shape == (768,) @pytest.mark.skipif(not torch.cuda.is_available(), reason="GPU is needed for this test") def test_encoding_gpu(): enc = TransformerTorchEncoder(device="cuda") input_data = DocumentArray([Document(text="hello world")]) enc.encode(docs=input_data, parameters={}) assert input_data[0].embedding.shape == (768,) def test_encodes_semantic_meaning(): sentences = dict() sentences["A"] = "Hello, my name is Michael." sentences["B"] = "Today we are going to Disney World." sentences["C"] = "There are animals on the road" sentences["D"] = "A dog is running down the road" encoder = TransformerTorchEncoder() embeddings = {} for id_, sentence in sentences.items(): docs = DocumentArray([Document(text=sentence)]) encoder.encode(docs, parameters={}) embeddings[id_] = docs[0].embedding def dist(a, b): a_embedding = embeddings[a] b_embedding = embeddings[b] return np.linalg.norm(a_embedding - b_embedding) small_distance = dist("C", "D") assert small_distance < dist("C", "B") assert small_distance < dist("C", "A") assert small_distance < dist("B", "A") @pytest.mark.parametrize( ["docs", "docs_per_path", "traversal_path"], [ (pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"), ( pytest.lazy_fixture("docs_with_chunk_text"), [["r", 0], ["c", 10], ["cc", 0]], "c", ), ( pytest.lazy_fixture("docs_with_chunk_chunk_text"), [["r", 0], ["c", 0], ["cc", 10]], "cc", ), ], ) def test_traversal_path( docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str ): def validate_traversal(expected_docs_per_path: List[List[str]]): def validate(res): for path, count in expected_docs_per_path: embeddings = ( DocumentArray(res).traverse_flat([path]).get_attributes("embedding") ) for emb in embeddings: if emb is None: return False return len(embeddings) == count return validate encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path]) encoder.encode(docs, {"traversal_paths": [traversal_path]}) assert validate_traversal(docs_per_path)(docs) def test_multiple_traversal_paths(): sentences = list() sentences.append("Hello, my name is Michael.") sentences.append("Today we are going to Disney World.") sentences.append("There are animals on the road") sentences.append("A dog is running down the road") docs = DocumentArray([Document(text=sentence) for sentence in sentences]) for index, sent in enumerate(sentences): docs[index].chunks.append(Document(text=sent)) docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index])) encoder = TransformerTorchEncoder(default_traversal_paths=["r", "c", "cc"]) encoder.encode(docs, {}) for doc in docs: assert doc.embedding.shape == (768,) assert doc.chunks[0].embedding.shape == (768,) assert doc.chunks[0].chunks[0].embedding.shape == (768,)
"""Algorithms for cross decomposition.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._pls import CCA, PLSSVD, PLSCanonical, PLSRegression __all__ = ["CCA", "PLSSVD", "PLSCanonical", "PLSRegression"]
"""Algorithms for cross decomposition.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._pls import CCA, PLSSVD, PLSCanonical, PLSRegression __all__ = ["PLSCanonical", "PLSRegression", "PLSSVD", "CCA"]
# Copyright (c) OpenMMLab. All rights reserved. from .det_data_sample import DetDataSample, OptSampleList, SampleList from .reid_data_sample import ReIDDataSample from .track_data_sample import (OptTrackSampleList, TrackDataSample, TrackSampleList) __all__ = [ 'DetDataSample', 'SampleList', 'OptSampleList', 'TrackDataSample', 'TrackSampleList', 'OptTrackSampleList', 'ReIDDataSample' ]
# Copyright (c) OpenMMLab. All rights reserved. from .det_data_sample import DetDataSample, OptSampleList, SampleList from .track_data_sample import (OptTrackSampleList, TrackDataSample, TrackSampleList) __all__ = [ 'DetDataSample', 'SampleList', 'OptSampleList', 'TrackDataSample', 'TrackSampleList', 'OptTrackSampleList' ]
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "3.2.0" from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled from .info import DatasetInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .utils import * from .utils import logging
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "3.1.1.dev0" from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled from .info import DatasetInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .utils import * from .utils import logging
import numpy as np import pytest from pydantic import Field from qdrant_client.http import models as rest from docarray import BaseDoc from docarray.index import QdrantDocumentIndex from docarray.typing import NdArray from tests.index.qdrant.fixtures import qdrant, qdrant_config # noqa: F401 class SimpleDoc(BaseDoc): embedding: NdArray[10] = Field(dim=1000) # type: ignore[valid-type] number: int text: str class SimpleSchema(BaseDoc): embedding: NdArray[10] = Field(space='cosine') # type: ignore[valid-type] number: int text: str def test_find_uses_provided_vector(qdrant_config): # noqa: F811 index = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) query = index.build_query().find(np.ones(10), 'embedding').build(7) # type: ignore[attr-defined] assert query.vector_field == 'embedding' assert np.allclose(query.vector_query, np.ones(10)) assert query.filter is None assert query.limit == 7 def test_multiple_find_returns_averaged_vector(qdrant_config): # noqa: F811 index = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) query = ( index.build_query() # type: ignore[attr-defined] .find(np.ones(10), 'embedding') .find(np.zeros(10), 'embedding') .build(5) ) assert query.vector_field == 'embedding' assert np.allclose(query.vector_query, np.array([0.5] * 10)) assert query.filter is None assert query.limit == 5 def test_multiple_find_different_field_raises_error(qdrant_config): # noqa: F811 index = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) with pytest.raises(ValueError): ( index.build_query() # type: ignore[attr-defined] .find(np.ones(10), 'embedding_1') .find(np.zeros(10), 'embedding_2') ) def test_filter_passes_qdrant_filter(qdrant_config): # noqa: F811 index = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) qdrant_filter = rest.Filter(should=[rest.HasIdCondition(has_id=[1, 2, 3])]) query = index.build_query().filter(qdrant_filter).build(11) # type: ignore[attr-defined] assert query.vector_field is None assert query.vector_query is None assert query.filter == rest.Filter(must=[qdrant_filter]) assert query.limit == 11 def test_text_search_creates_qdrant_filter(qdrant_config): # noqa: F811 index = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) query = index.build_query().text_search('lorem ipsum', 'text').build(3) # type: ignore[attr-defined] assert query.vector_field is None assert query.vector_query is None assert isinstance(query.filter, rest.Filter) assert len(query.filter.must) == 1 # type: ignore[arg-type] assert isinstance(query.filter.must[0], rest.FieldCondition) # type: ignore[index] assert query.filter.must[0].key == 'text' # type: ignore[index] assert query.filter.must[0].match.text == 'lorem ipsum' # type: ignore[index, union-attr] assert query.limit == 3 def test_query_builder_execute_query_find_text_search_filter( qdrant_config, # noqa: F811 ): index = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) index_docs = [ SimpleDoc( embedding=np.ones(10), number=i, text=f'Lorem ipsum {i}', ) for i in range(10, 30, 2) ] index.index(index_docs) find_query = np.ones(10) text_search_query = 'ipsum 1' filter_query = rest.Filter( must=[ rest.FieldCondition( key='number', range=rest.Range( gte=12, lt=18, ), ) ] ) query = ( index.build_query() # type: ignore[attr-defined] .find(find_query, search_field='embedding') .text_search(text_search_query, search_field='text') .filter(filter_query) .build(limit=5) ) docs = index.execute_query(query) assert len(docs) == 3 assert all(x in docs.number for x in [12, 14, 16])
import pytest import numpy as np from pydantic import Field from docarray import BaseDoc from docarray.index import QdrantDocumentIndex from docarray.typing import NdArray from qdrant_client.http import models as rest from .fixtures import qdrant_config, qdrant class SimpleDoc(BaseDoc): embedding: NdArray[10] = Field(dim=1000) # type: ignore[valid-type] number: int text: str class SimpleSchema(BaseDoc): embedding: NdArray[10] = Field(space='cosine') # type: ignore[valid-type] number: int text: str def test_find_uses_provided_vector(qdrant_config, qdrant): store = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) query = store.build_query().find(np.ones(10), 'embedding').build(7) # type: ignore[attr-defined] assert query.vector_field == 'embedding' assert np.allclose(query.vector_query, np.ones(10)) assert query.filter is None assert query.limit == 7 def test_multiple_find_returns_averaged_vector(qdrant_config, qdrant): store = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) query = ( store.build_query() # type: ignore[attr-defined] .find(np.ones(10), 'embedding') .find(np.zeros(10), 'embedding') .build(5) ) assert query.vector_field == 'embedding' assert np.allclose(query.vector_query, np.array([0.5] * 10)) assert query.filter is None assert query.limit == 5 def test_multiple_find_different_field_raises_error(qdrant_config, qdrant): store = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) with pytest.raises(ValueError): ( store.build_query() # type: ignore[attr-defined] .find(np.ones(10), 'embedding_1') .find(np.zeros(10), 'embedding_2') ) def test_filter_passes_qdrant_filter(qdrant_config, qdrant): store = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) qdrant_filter = rest.Filter(should=[rest.HasIdCondition(has_id=[1, 2, 3])]) query = store.build_query().filter(qdrant_filter).build(11) # type: ignore[attr-defined] assert query.vector_field is None assert query.vector_query is None assert query.filter == rest.Filter(must=[qdrant_filter]) assert query.limit == 11 def test_text_search_creates_qdrant_filter(qdrant_config, qdrant): store = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) query = store.build_query().text_search('lorem ipsum', 'text').build(3) # type: ignore[attr-defined] assert query.vector_field is None assert query.vector_query is None assert isinstance(query.filter, rest.Filter) assert len(query.filter.must) == 1 # type: ignore[arg-type] assert isinstance(query.filter.must[0], rest.FieldCondition) # type: ignore[index] assert query.filter.must[0].key == 'text' # type: ignore[index] assert query.filter.must[0].match.text == 'lorem ipsum' # type: ignore[index, union-attr] assert query.limit == 3 def test_query_builder_execute_query_find_text_search_filter(qdrant_config, qdrant): store = QdrantDocumentIndex[SimpleSchema](db_config=qdrant_config) index_docs = [ SimpleDoc( embedding=np.ones(10), number=i, text=f'Lorem ipsum {i}', ) for i in range(10, 30, 2) ] store.index(index_docs) find_query = np.ones(10) text_search_query = 'ipsum 1' filter_query = rest.Filter( must=[ rest.FieldCondition( key='number', range=rest.Range( gte=12, lt=18, ), ) ] ) query = ( store.build_query() # type: ignore[attr-defined] .find(find_query, search_field='embedding') .text_search(text_search_query, search_field='text') .filter(filter_query) .build(limit=5) ) docs = store.execute_query(query) assert len(docs) == 3 assert all(x in docs.number for x in [12, 14, 16])
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: docarray.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xe7\x01\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12+\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12\x36\n\x0e\x64ocument_array\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12\x0e\n\x04type\x18\x06 \x01(\tH\x01\x42\t\n\x07\x63ontentB\x0f\n\rdocarray_type\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3' ) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _DOCUMENTPROTO_DATAENTRY._options = None _DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001' _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001' _DENSENDARRAYPROTO._serialized_start = 58 _DENSENDARRAYPROTO._serialized_end = 123 _NDARRAYPROTO._serialized_start = 125 _NDARRAYPROTO._serialized_end = 228 _NODEPROTO._serialized_start = 231 _NODEPROTO._serialized_end = 462 _DOCUMENTPROTO._serialized_start = 465 _DOCUMENTPROTO._serialized_end = 595 _DOCUMENTPROTO_DATAENTRY._serialized_start = 531 _DOCUMENTPROTO_DATAENTRY._serialized_end = 595 _DOCUMENTARRAYPROTO._serialized_start = 597 _DOCUMENTARRAYPROTO._serialized_end = 656 _UNIONARRAYPROTO._serialized_start = 659 _UNIONARRAYPROTO._serialized_end = 793 _DOCUMENTARRAYSTACKEDPROTO._serialized_start = 796 _DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1010 _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 937 _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1010 # @@protoc_insertion_point(module_scope)
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: docarray.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x8a\x05\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x12\n\x08mesh_url\x18\x0c \x01(\tH\x00\x12\x19\n\x0fpoint_cloud_url\x18\r \x01(\tH\x00\x12\x13\n\taudio_url\x18\x0e \x01(\tH\x00\x12/\n\raudio_ndarray\x18\x0f \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x34\n\x12\x61udio_torch_tensor\x18\x10 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x13\n\tvideo_url\x18\x11 \x01(\tH\x00\x12/\n\rvideo_ndarray\x18\x12 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x34\n\x12video_torch_tensor\x18\x13 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3' ) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _DOCUMENTPROTO_DATAENTRY._options = None _DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001' _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001' _DENSENDARRAYPROTO._serialized_start = 58 _DENSENDARRAYPROTO._serialized_end = 123 _NDARRAYPROTO._serialized_start = 125 _NDARRAYPROTO._serialized_end = 228 _NODEPROTO._serialized_start = 231 _NODEPROTO._serialized_end = 881 _DOCUMENTPROTO._serialized_start = 884 _DOCUMENTPROTO._serialized_end = 1014 _DOCUMENTPROTO_DATAENTRY._serialized_start = 950 _DOCUMENTPROTO_DATAENTRY._serialized_end = 1014 _DOCUMENTARRAYPROTO._serialized_start = 1016 _DOCUMENTARRAYPROTO._serialized_end = 1075 _UNIONARRAYPROTO._serialized_start = 1078 _UNIONARRAYPROTO._serialized_end = 1212 _DOCUMENTARRAYSTACKEDPROTO._serialized_start = 1215 _DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1429 _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1356 _DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1429 # @@protoc_insertion_point(module_scope)
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmengine.dist import get_world_size from mmengine.logging import print_log from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage import SingleStageDetector @MODELS.register_module() class RTMDet(SingleStageDetector): """Implementation of RTMDet. Args: backbone (:obj:`ConfigDict` or dict): The backbone module. neck (:obj:`ConfigDict` or dict): The neck module. bbox_head (:obj:`ConfigDict` or dict): The bbox head module. train_cfg (:obj:`ConfigDict` or dict, optional): The training config of ATSS. Defaults to None. test_cfg (:obj:`ConfigDict` or dict, optional): The testing config of ATSS. Defaults to None. data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of :class:`DetDataPreprocessor` to process the input data. Defaults to None. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. Defaults to None. use_syncbn (bool): Whether to use SyncBatchNorm. Defaults to True. """ def __init__(self, backbone: ConfigType, neck: ConfigType, bbox_head: ConfigType, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, data_preprocessor: OptConfigType = None, init_cfg: OptMultiConfig = None, use_syncbn: bool = True) -> None: super().__init__( backbone=backbone, neck=neck, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg) # TODO: Waiting for mmengine support if use_syncbn and get_world_size() > 1: torch.nn.SyncBatchNorm.convert_sync_batchnorm(self) print_log('Using SyncBatchNorm()', 'current')
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmengine.dist import get_world_size from mmengine.logging import print_log from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage import SingleStageDetector @MODELS.register_module() class RTMDet(SingleStageDetector): """Implementation of RTMDet. Args: backbone (:obj:`ConfigDict` or dict): The backbone module. neck (:obj:`ConfigDict` or dict): The neck module. bbox_head (:obj:`ConfigDict` or dict): The bbox head module. train_cfg (:obj:`ConfigDict` or dict, optional): The training config of ATSS. Defaults to None. test_cfg (:obj:`ConfigDict` or dict, optional): The testing config of ATSS. Defaults to None. data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of :class:`DetDataPreprocessor` to process the input data. Defaults to None. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. Defaults to None. use_syncbn (bool): Whether to use SyncBatchNorm. Defaults to True. """ def __init__(self, backbone: ConfigType, neck: ConfigType, bbox_head: ConfigType, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, data_preprocessor: OptConfigType = None, init_cfg: OptMultiConfig = None, use_syncbn: bool = True) -> None: super().__init__( backbone=backbone, neck=neck, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg) # TODO: Waiting for mmengine support if use_syncbn and get_world_size() > 1: torch.nn.SyncBatchNorm.convert_sync_batchnorm(self) print_log('Using SyncBatchNorm()', 'current')
from __future__ import annotations from collections.abc import Iterable import torch.nn as nn from torch import Tensor from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCosineSimilarityLoss(CosineSimilarityLoss): def __init__( self, model: SparseEncoder, loss_fct: nn.Module = nn.MSELoss(), cos_score_transformation: nn.Module = nn.Identity(), ) -> None: """ SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two. By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``. Args: model: SparseEncoder model loss_fct: Which pytorch loss function should be used to compare the ``cosine_similarity(u, v)`` with the input_label? By default, MSE is used: ``||input_label - cosine_sim(u, v)||_2`` cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity. By default, the identify function is used (i.e. no change). Requirements: - Need to be used in SpladeLoss or CSRLoss as a loss function. - Sentence pairs with corresponding similarity scores in range `[0, 1]` Inputs: +--------------------------------+------------------------+ | Texts | Labels | +================================+========================+ | (sentence_A, sentence_B) pairs | float similarity score | +--------------------------------+------------------------+ Relations: - :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. Example: :: from datasets import Dataset from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses model = SparseEncoder("distilbert/distilbert-base-uncased") train_dataset = Dataset.from_dict( { "sentence1": ["It's nice weather outside today.", "He drove to work."], "sentence2": ["It's so sunny.", "She walked to the store."], "score": [1.0, 0.3], } ) loss = losses.SpladeLoss( model=model, loss=losses.SparseCosineSimilarityLoss(model), document_regularizer_weight=5e-5, use_document_regularizer_only=True, ) trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss) trainer.train() """ model.similarity_fn_name = "cosine" return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation) def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor: raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
from __future__ import annotations from collections.abc import Iterable import torch.nn as nn from torch import Tensor from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCosineSimilarityLoss(CosineSimilarityLoss): def __init__( self, model: SparseEncoder, loss_fct: nn.Module = nn.MSELoss(), cos_score_transformation: nn.Module = nn.Identity(), ) -> None: """ SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two. By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``. Args: model: SparseEncoder model loss_fct: Which pytorch loss function should be used to compare the ``cosine_similarity(u, v)`` with the input_label? By default, MSE is used: ``||input_label - cosine_sim(u, v)||_2`` cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity. By default, the identify function is used (i.e. no change). Requirements: - Need to be used in SpladeLoss or CSRLoss as a loss function. - Sentence pairs with corresponding similarity scores in range `[0, 1]` Inputs: +--------------------------------+------------------------+ | Texts | Labels | +================================+========================+ | (sentence_A, sentence_B) pairs | float similarity score | +--------------------------------+------------------------+ Relations: - :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. Example: :: from datasets import Dataset from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses model = SparseEncoder("distilbert/distilbert-base-uncased") train_dataset = Dataset.from_dict( { "sentence1": ["It's nice weather outside today.", "He drove to work."], "sentence2": ["It's so sunny.", "She walked to the store."], "score": [1.0, 0.3], } ) loss = losses.SpladeLoss( model=model, loss=losses.SparseCosineSimilarityLoss(model), corpus_regularizer_weight=5e-5, use_corpus_regularizer_only=True, ) trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss) trainer.train() """ model.similarity_fn_name = "cosine" return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation) def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor: raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
"""Tools for model selection, such as cross validation and hyper-parameter tuning.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import typing from ._classification_threshold import ( FixedThresholdClassifier, TunedThresholdClassifierCV, ) from ._plot import LearningCurveDisplay, ValidationCurveDisplay from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV from ._split import ( BaseCrossValidator, BaseShuffleSplit, GroupKFold, GroupShuffleSplit, KFold, LeaveOneGroupOut, LeaveOneOut, LeavePGroupsOut, LeavePOut, PredefinedSplit, RepeatedKFold, RepeatedStratifiedKFold, ShuffleSplit, StratifiedGroupKFold, StratifiedKFold, StratifiedShuffleSplit, TimeSeriesSplit, check_cv, train_test_split, ) from ._validation import ( cross_val_predict, cross_val_score, cross_validate, learning_curve, permutation_test_score, validation_curve, ) if typing.TYPE_CHECKING: # Avoid errors in type checkers (e.g. mypy) for experimental estimators. # TODO: remove this check once the estimator is no longer experimental. from ._search_successive_halving import ( # noqa: F401 HalvingGridSearchCV, HalvingRandomSearchCV, ) __all__ = [ "BaseCrossValidator", "BaseShuffleSplit", "FixedThresholdClassifier", "GridSearchCV", "GroupKFold", "GroupShuffleSplit", "KFold", "LearningCurveDisplay", "LeaveOneGroupOut", "LeaveOneOut", "LeavePGroupsOut", "LeavePOut", "ParameterGrid", "ParameterSampler", "PredefinedSplit", "RandomizedSearchCV", "RepeatedKFold", "RepeatedStratifiedKFold", "ShuffleSplit", "StratifiedGroupKFold", "StratifiedKFold", "StratifiedShuffleSplit", "TimeSeriesSplit", "TunedThresholdClassifierCV", "ValidationCurveDisplay", "check_cv", "cross_val_predict", "cross_val_score", "cross_validate", "learning_curve", "permutation_test_score", "train_test_split", "validation_curve", ] # TODO: remove this check once the estimator is no longer experimental. def __getattr__(name): if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}: raise ImportError( f"{name} is experimental and the API might change without any " "deprecation cycle. To use it, you need to explicitly import " "enable_halving_search_cv:\n" "from sklearn.experimental import enable_halving_search_cv" ) raise AttributeError(f"module {__name__} has no attribute {name}")
"""Tools for model selection, such as cross validation and hyper-parameter tuning.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import typing from ._classification_threshold import ( FixedThresholdClassifier, TunedThresholdClassifierCV, ) from ._plot import LearningCurveDisplay, ValidationCurveDisplay from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV from ._split import ( BaseCrossValidator, BaseShuffleSplit, GroupKFold, GroupShuffleSplit, KFold, LeaveOneGroupOut, LeaveOneOut, LeavePGroupsOut, LeavePOut, PredefinedSplit, RepeatedKFold, RepeatedStratifiedKFold, ShuffleSplit, StratifiedGroupKFold, StratifiedKFold, StratifiedShuffleSplit, TimeSeriesSplit, check_cv, train_test_split, ) from ._validation import ( cross_val_predict, cross_val_score, cross_validate, learning_curve, permutation_test_score, validation_curve, ) if typing.TYPE_CHECKING: # Avoid errors in type checkers (e.g. mypy) for experimental estimators. # TODO: remove this check once the estimator is no longer experimental. from ._search_successive_halving import ( # noqa HalvingGridSearchCV, HalvingRandomSearchCV, ) __all__ = [ "BaseCrossValidator", "BaseShuffleSplit", "FixedThresholdClassifier", "GridSearchCV", "GroupKFold", "GroupShuffleSplit", "KFold", "LearningCurveDisplay", "LeaveOneGroupOut", "LeaveOneOut", "LeavePGroupsOut", "LeavePOut", "ParameterGrid", "ParameterSampler", "PredefinedSplit", "RandomizedSearchCV", "RepeatedKFold", "RepeatedStratifiedKFold", "ShuffleSplit", "StratifiedGroupKFold", "StratifiedKFold", "StratifiedShuffleSplit", "TimeSeriesSplit", "TunedThresholdClassifierCV", "ValidationCurveDisplay", "check_cv", "cross_val_predict", "cross_val_score", "cross_validate", "learning_curve", "permutation_test_score", "train_test_split", "validation_curve", ] # TODO: remove this check once the estimator is no longer experimental. def __getattr__(name): if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}: raise ImportError( f"{name} is experimental and the API might change without any " "deprecation cycle. To use it, you need to explicitly import " "enable_halving_search_cv:\n" "from sklearn.experimental import enable_halving_search_cv" ) raise AttributeError(f"module {__name__} has no attribute {name}")
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import MagicMock, patch import pytest import torch import torch.nn as nn from torch.nn.parallel import DataParallel from torch.nn.parallel.distributed import DistributedDataParallel from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel, is_model_wrapper) from mmengine.registry import MODEL_WRAPPERS def mock(*args, **kwargs): pass @patch('torch.distributed._broadcast_coalesced', mock) @patch('torch.distributed.broadcast', mock) @patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock) def test_is_model_wrapper(): class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(2, 2, 1) def forward(self, x): return self.conv(x) # _verify_model_across_ranks is added in torch1.9.0 so we should check # whether _verify_model_across_ranks is the member of torch.distributed # before mocking if hasattr(torch.distributed, '_verify_model_across_ranks'): torch.distributed._verify_model_across_ranks = mock model = Model() assert not is_model_wrapper(model) mmdp = MMDataParallel(model) assert is_model_wrapper(mmdp) mmddp = MMDistributedDataParallel(model, process_group=MagicMock()) assert is_model_wrapper(mmddp) torch_dp = DataParallel(model) assert is_model_wrapper(torch_dp) torch_ddp = DistributedDataParallel(model, process_group=MagicMock()) assert is_model_wrapper(torch_ddp) # test model wrapper registry @MODEL_WRAPPERS.register_module() class ModelWrapper: def __init__(self, module): self.module = module def forward(self, *args, **kwargs): return self.module(*args, **kwargs) model_wrapper = ModelWrapper(model) assert is_model_wrapper(model_wrapper) class TestMMDataParallel(TestCase): def setUp(self): """Setup the demo image in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(1, 2, 1) def forward(self, x): return self.conv(x) def train_step(self, x): return self.forward(x) def val_step(self, x): return self.forward(x) self.model = Model() def test_train_step(self): class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(1, 2, 1) def forward(self, x): return self.conv(x) model = Model() mmdp = MMDataParallel(model) # test without train_step attribute with pytest.raises(AssertionError): mmdp.train_step(torch.zeros([1, 1, 3, 3])) out = self.model.train_step(torch.zeros([1, 1, 3, 3])) assert out.shape == (1, 2, 3, 3) def test_val_step(self): class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(1, 2, 1) def forward(self, x): return self.conv(x) model = Model() mmdp = MMDataParallel(model) # test without val_step attribute with pytest.raises(AssertionError): mmdp.val_step(torch.zeros([1, 1, 3, 3])) out = self.model.val_step(torch.zeros([1, 1, 3, 3])) assert out.shape == (1, 2, 3, 3)
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import MagicMock, patch import pytest import torch import torch.nn as nn from torch.nn.parallel import DataParallel from torch.nn.parallel.distributed import DistributedDataParallel from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel, is_model_wrapper) from mmengine.registry import MODEL_WRAPPERS def mock(*args, **kwargs): pass @patch('torch.distributed._broadcast_coalesced', mock) @patch('torch.distributed.broadcast', mock) @patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock) def test_is_model_wrapper(): class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(2, 2, 1) def forward(self, x): return self.conv(x) # _verify_model_across_ranks is added in torch1.9.0 so we should check # whether _verify_model_across_ranks is the member of torch.distributed # before mocking if hasattr(torch.distributed, '_verify_model_across_ranks'): torch.distributed._verify_model_across_ranks = mock model = Model() assert not is_model_wrapper(model) mmdp = MMDataParallel(model) assert is_model_wrapper(mmdp) mmddp = MMDistributedDataParallel(model, process_group=MagicMock()) assert is_model_wrapper(mmddp) torch_dp = DataParallel(model) assert is_model_wrapper(torch_dp) torch_ddp = DistributedDataParallel(model, process_group=MagicMock()) assert is_model_wrapper(torch_ddp) # test model wrapper registry @MODEL_WRAPPERS.register_module() class ModelWrapper(object): def __init__(self, module): self.module = module def forward(self, *args, **kwargs): return self.module(*args, **kwargs) model_wrapper = ModelWrapper(model) assert is_model_wrapper(model_wrapper) class TestMMDataParallel(TestCase): def setUp(self): """Setup the demo image in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(1, 2, 1) def forward(self, x): return self.conv(x) def train_step(self, x): return self.forward(x) def val_step(self, x): return self.forward(x) self.model = Model() def test_train_step(self): class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(1, 2, 1) def forward(self, x): return self.conv(x) model = Model() mmdp = MMDataParallel(model) # test without train_step attribute with pytest.raises(AssertionError): mmdp.train_step(torch.zeros([1, 1, 3, 3])) out = self.model.train_step(torch.zeros([1, 1, 3, 3])) assert out.shape == (1, 2, 3, 3) def test_val_step(self): class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(1, 2, 1) def forward(self, x): return self.conv(x) model = Model() mmdp = MMDataParallel(model) # test without val_step attribute with pytest.raises(AssertionError): mmdp.val_step(torch.zeros([1, 1, 3, 3])) out = self.model.val_step(torch.zeros([1, 1, 3, 3])) assert out.shape == (1, 2, 3, 3)
"""DataForSeo API Toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.dataforseo_api_search.tool import ( DataForSeoAPISearchResults, DataForSeoAPISearchRun, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "DataForSeoAPISearchRun": "langchain_community.tools.dataforseo_api_search.tool", "DataForSeoAPISearchResults": ( "langchain_community.tools.dataforseo_api_search.tool" ), } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "DataForSeoAPISearchResults", "DataForSeoAPISearchRun", ]
"""DataForSeo API Toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.dataforseo_api_search.tool import ( DataForSeoAPISearchResults, DataForSeoAPISearchRun, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "DataForSeoAPISearchRun": "langchain_community.tools.dataforseo_api_search.tool", "DataForSeoAPISearchResults": ( "langchain_community.tools.dataforseo_api_search.tool" ), } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "DataForSeoAPISearchRun", "DataForSeoAPISearchResults", ]
import os import re from pathlib import Path from typing import Optional, Tuple, Union import torch from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import _load_waveform, extract_archive URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz" SAMPLE_RATE = 8000 _CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4" _LANGUAGES = [ "albanian", "basque", "czech", "nnenglish", "romanian", "slovak", ] class QUESST14(Dataset): """*QUESST14* :cite:`Mir2015QUESST2014EQ` dataset. Args: root (str or Path): Root directory where the dataset's top level directory is found subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``]. language (str or None, optional): Language to get dataset for. Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``]. If ``None``, dataset consists of all languages. (default: ``"nnenglish"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``) """ def __init__( self, root: Union[str, Path], subset: str, language: Optional[str] = "nnenglish", download: bool = False, ) -> None: if subset not in ["docs", "dev", "eval"]: raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']") if language is not None and language not in _LANGUAGES: raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}") # Get string representation of 'root' root = os.fspath(root) basename = os.path.basename(URL) archive = os.path.join(root, basename) basename = basename.rsplit(".", 2)[0] self._path = os.path.join(root, basename) if not os.path.isdir(self._path): if not os.path.isfile(archive): if not download: raise RuntimeError("Dataset not found. Please use `download=True` to download") download_url_to_file(URL, archive, hash_prefix=_CHECKSUM) extract_archive(archive, root) if subset == "docs": self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst") elif subset == "dev": self.data = filter_audio_paths(self._path, language, "language_key_dev.lst") elif subset == "eval": self.data = filter_audio_paths(self._path, language, "language_key_eval.lst") def get_metadata(self, n: int) -> Tuple[str, int, str]: """Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform, but otherwise returns the same fields as :py:func:`__getitem__`. Args: n (int): The index of the sample to be loaded Returns: Tuple of the following items; str: Path to audio int: Sample rate str: File name """ audio_path = self.data[n] relpath = os.path.relpath(audio_path, self._path) return relpath, SAMPLE_RATE, audio_path.with_suffix("").name def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: Tuple of the following items; Tensor: Waveform int: Sample rate str: File name """ metadata = self.get_metadata(n) waveform = _load_waveform(self._path, metadata[0], metadata[1]) return (waveform,) + metadata[1:] def __len__(self) -> int: return len(self.data) def filter_audio_paths( path: str, language: str, lst_name: str, ): """Extract audio paths for the given language.""" audio_paths = [] path = Path(path) with open(path / "scoring" / lst_name) as f: for line in f: audio_path, lang = line.strip().split() if language is not None and lang != language: continue audio_path = re.sub(r"^.*?\/", "", audio_path) audio_paths.append(path / audio_path) return audio_paths
import os import re from pathlib import Path from typing import Optional, Tuple, Union import torch from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import _load_waveform, extract_archive URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz" SAMPLE_RATE = 8000 _CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4" _LANGUAGES = [ "albanian", "basque", "czech", "nnenglish", "romanian", "slovak", ] class QUESST14(Dataset): """Create *QUESST14* :cite:`Mir2015QUESST2014EQ` Dataset Args: root (str or Path): Root directory where the dataset's top level directory is found subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``]. language (str or None, optional): Language to get dataset for. Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``]. If ``None``, dataset consists of all languages. (default: ``"nnenglish"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``) """ def __init__( self, root: Union[str, Path], subset: str, language: Optional[str] = "nnenglish", download: bool = False, ) -> None: if subset not in ["docs", "dev", "eval"]: raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']") if language is not None and language not in _LANGUAGES: raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}") # Get string representation of 'root' root = os.fspath(root) basename = os.path.basename(URL) archive = os.path.join(root, basename) basename = basename.rsplit(".", 2)[0] self._path = os.path.join(root, basename) if not os.path.isdir(self._path): if not os.path.isfile(archive): if not download: raise RuntimeError("Dataset not found. Please use `download=True` to download") download_url_to_file(URL, archive, hash_prefix=_CHECKSUM) extract_archive(archive, root) if subset == "docs": self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst") elif subset == "dev": self.data = filter_audio_paths(self._path, language, "language_key_dev.lst") elif subset == "eval": self.data = filter_audio_paths(self._path, language, "language_key_eval.lst") def get_metadata(self, n: int) -> Tuple[str, int, str]: """Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform, but otherwise returns the same fields as :py:func:`__getitem__`. Args: n (int): The index of the sample to be loaded Returns: (str, int, str): ``(filepath, sample_rate, file_name)`` """ audio_path = self.data[n] relpath = os.path.relpath(audio_path, self._path) return relpath, SAMPLE_RATE, audio_path.with_suffix("").name def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: (Tensor, int, str): ``(waveform, sample_rate, file_name)`` """ metadata = self.get_metadata(n) waveform = _load_waveform(self._path, metadata[0], metadata[1]) return (waveform,) + metadata[1:] def __len__(self) -> int: return len(self.data) def filter_audio_paths( path: str, language: str, lst_name: str, ): """Extract audio paths for the given language.""" audio_paths = [] path = Path(path) with open(path / "scoring" / lst_name) as f: for line in f: audio_path, lang = line.strip().split() if language is not None and lang != language: continue audio_path = re.sub(r"^.*?\/", "", audio_path) audio_paths.append(path / audio_path) return audio_paths
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional, utils # usort: skip from ._transform import Transform # usort: skip from ._augment import RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide from ._color import ( ColorJitter, Grayscale, RandomAdjustSharpness, RandomAutocontrast, RandomEqualize, RandomGrayscale, RandomInvert, RandomPhotometricDistort, RandomPosterize, RandomSolarize, ) from ._container import Compose, RandomApply, RandomChoice, RandomOrder from ._geometry import ( CenterCrop, ElasticTransform, FiveCrop, Pad, RandomAffine, RandomCrop, RandomHorizontalFlip, RandomIoUCrop, RandomPerspective, RandomResize, RandomResizedCrop, RandomRotation, RandomShortestSize, RandomVerticalFlip, RandomZoomOut, Resize, ScaleJitter, TenCrop, ) from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, SanitizeBoundingBox, ToDtype from ._temporal import UniformTemporalSubsample from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage from ._deprecated import ToTensor # usort: skip from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS if _WARN_ABOUT_BETA_TRANSFORMS: import warnings warnings.warn(_BETA_TRANSFORMS_WARNING)
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional, utils # usort: skip from ._transform import Transform # usort: skip from ._augment import RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide from ._color import ( ColorJitter, Grayscale, RandomAdjustSharpness, RandomAutocontrast, RandomEqualize, RandomGrayscale, RandomInvert, RandomPhotometricDistort, RandomPosterize, RandomSolarize, ) from ._container import Compose, RandomApply, RandomChoice, RandomOrder from ._geometry import ( CenterCrop, ElasticTransform, FiveCrop, Pad, RandomAffine, RandomCrop, RandomHorizontalFlip, RandomIoUCrop, RandomPerspective, RandomResize, RandomResizedCrop, RandomRotation, RandomShortestSize, RandomVerticalFlip, RandomZoomOut, Resize, ScaleJitter, TenCrop, ) from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, SanitizeBoundingBoxes, ToDtype from ._temporal import UniformTemporalSubsample from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage from ._deprecated import ToTensor # usort: skip from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS if _WARN_ABOUT_BETA_TRANSFORMS: import warnings warnings.warn(_BETA_TRANSFORMS_WARNING)
"""Chain-of-Abstraction Output Parser.""" import asyncio import json import re from collections import defaultdict from typing import Dict, Tuple import networkx as nx from llama_index.core.tools import AsyncBaseTool, ToolOutput from llama_index.core.types import BaseOutputParser class ChainOfAbstractionParser(BaseOutputParser): """ Chain of abstraction output parser. This parser is used to parse the output using the default prompt defined in prompts.py. If the prompt formatting changes the function format, this parser will not work and should be updated. """ def __init__(self, verbose: bool = False): """Init params.""" self._verbose = verbose def parse( self, solution: str, tools_by_name: Dict[str, AsyncBaseTool] ) -> Tuple[str, int]: return asyncio.run(self.aparse(solution, tools_by_name)) async def aparse( self, solution: str, tools_by_name: Dict[str, AsyncBaseTool] ) -> Tuple[str, int]: # Extract function calls and placeholders func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution) placeholders = set() for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution): placeholders.add(match.group(3)) # Create a dependency graph graph = nx.DiGraph() for func_name, inputs, output in func_calls: parsed_inputs = [] if inputs.strip(): # Ensure inputs string is not empty input_parts = [part.strip() for part in inputs.split(",")] for part in input_parts: try: # Try to parse as a JSON literal (e.g., number, bool) parsed_inputs.append(json.loads(part)) except json.JSONDecodeError: # If it fails, treat it as a bare string/placeholder parsed_inputs.append(part) graph.add_node(output, func_name=func_name, inputs=parsed_inputs) for inp in parsed_inputs: # Add an edge only if the input is a placeholder from a previous step if isinstance(inp, str) and inp in placeholders: graph.add_edge(inp, output) # Find the execution levels execution_levels = defaultdict(list) for node in nx.topological_sort(graph): level = ( max( [execution_levels[pred] for pred in graph.predecessors(node)], default=-1, ) + 1 ) execution_levels[node] = level # Group nodes by execution level level_groups = defaultdict(list) for node, level in execution_levels.items(): level_groups[level].append(node) # Execute functions and replace placeholders results = {} tool_outputs = [] graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)} for level in sorted(level_groups.keys()): level_nodes = level_groups[level] parallel_results = {} for placeholder in level_nodes: if len(graph_nodes[placeholder]) == 0: continue # get function name and inputs func_name, inputs = ( graph_nodes[placeholder]["func_name"], graph_nodes[placeholder]["inputs"], ) # loop up any inputs that depend on other functions input_values = [results.get(inp, inp) for inp in inputs] if self._verbose: print( f"==== Executing {func_name} with inputs {input_values} ====", flush=True, ) # execute function and store result try: tool_output = await tools_by_name[func_name].acall(*input_values) tool_outputs.append(tool_output) except Exception as e: tool_outputs.append( ToolOutput( content=str(e), tool_name=func_name, raw_output=None, raw_input={"args": input_values}, is_error=True, ) ) # If an error occurs, stop execution break parallel_results[placeholder] = tool_output.raw_output results.update(parallel_results) # Replace placeholders in the solution text for placeholder, value in results.items(): solution = solution.replace(f"{placeholder}", '"' + str(value) + '"') return solution, tool_outputs
"""Chain-of-Abstraction Output Parser.""" import asyncio import json import networkx as nx import re from collections import defaultdict from typing import Dict, Tuple from llama_index.core.tools import AsyncBaseTool, ToolOutput from llama_index.core.types import BaseOutputParser class ChainOfAbstractionParser(BaseOutputParser): """ Chain of abstraction output parser. This parser is used to parse the output using the default prompt defined in prompts.py. If the prompt formatting changes the function format, this parser will not work and should be updated. """ def __init__(self, verbose: bool = False): """Init params.""" self._verbose = verbose def parse( self, solution: str, tools_by_name: Dict[str, AsyncBaseTool] ) -> Tuple[str, int]: return asyncio.run(self.aparse(solution, tools_by_name)) async def aparse( self, solution: str, tools_by_name: Dict[str, AsyncBaseTool] ) -> Tuple[str, int]: # Extract function calls and placeholders func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution) placeholders = set() for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution): placeholders.add(match.group(3)) # Create a dependency graph graph = nx.DiGraph() for func_name, inputs, output in func_calls: inputs = json.loads("[" + inputs + "]") graph.add_node(output, func_name=func_name, inputs=inputs) for inp in inputs: graph.add_edge(inp, output) # Find the execution levels execution_levels = defaultdict(list) for node in nx.topological_sort(graph): level = ( max( [execution_levels[pred] for pred in graph.predecessors(node)], default=-1, ) + 1 ) execution_levels[node] = level # Group nodes by execution level level_groups = defaultdict(list) for node, level in execution_levels.items(): level_groups[level].append(node) # Execute functions and replace placeholders results = {} tool_outputs = [] graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)} for level in sorted(level_groups.keys()): level_nodes = level_groups[level] parallel_results = {} for placeholder in level_nodes: if len(graph_nodes[placeholder]) == 0: continue # get function name and inputs func_name, inputs = ( graph_nodes[placeholder]["func_name"], graph_nodes[placeholder]["inputs"], ) # loop up any inputs that depend on other functions breakpoint() input_values = [results.get(inp, inp) for inp in inputs] if self._verbose: print( f"==== Executing {func_name} with inputs {input_values} ====", flush=True, ) # execute function and store result try: tool_output = await tools_by_name[func_name].acall(*input_values) tool_outputs.append(tool_output) except Exception as e: tool_outputs.append( ToolOutput( content=str(e), tool_name=func_name, raw_output=None, raw_input={"args": input_values}, is_error=True, ) ) # If an error occurs, stop execution break parallel_results[placeholder] = tool_output.raw_output results.update(parallel_results) # Replace placeholders in the solution text for placeholder, value in results.items(): solution = solution.replace(f"{placeholder}", '"' + str(value) + '"') return solution, tool_outputs
from jina.serve.runtimes.gateway.http.gateway import HTTPGateway __all__ = ['HTTPGateway']
from .gateway import HTTPGateway __all__ = ['HTTPGateway']
from llama_index.llms.vertex import Vertex def test_vertex_metadata_function_calling(): """Test that Vertex LLM metadata correctly identifies Gemini models as function calling models.""" # This test uses mocks to avoid actual API calls from unittest.mock import patch, Mock with patch( "llama_index.llms.vertex.gemini_utils.create_gemini_client" ) as mock_create_client: # Test Gemini model mock_client = Mock() mock_create_client.return_value = mock_client llm = Vertex(model="gemini-pro", project="test-project") metadata = llm.metadata assert metadata.is_function_calling_model is True assert metadata.model_name == "gemini-pro" assert metadata.is_chat_model is True def test_vertex_metadata_non_function_calling(): """Test that Vertex LLM metadata correctly identifies non-Gemini models as non-function calling models.""" from unittest.mock import patch, Mock with patch( "vertexai.language_models.ChatModel.from_pretrained" ) as mock_from_pretrained: mock_chat_client = Mock() mock_from_pretrained.return_value = mock_chat_client llm = Vertex(model="chat-bison") metadata = llm.metadata assert metadata.is_function_calling_model is False assert metadata.model_name == "chat-bison" assert metadata.is_chat_model is True
from llama_index.core.base.llms.base import BaseLLM from llama_index.llms.vertex import Vertex def test_embedding_class(): names_of_base_classes = [b.__name__ for b in Vertex.__mro__] assert BaseLLM.__name__ in names_of_base_classes
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings as _warnings import docarray as _docarray if _sys.version_info < (3, 7, 0): raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}') def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs): return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % ( category.__name__, message, filename, lineno, ) _warnings.formatwarning = _warning_on_one_line _warnings.simplefilter('always', DeprecationWarning) # fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start _os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES' # JINA_MP_START_METHOD has higher priority than os-patch _start_method = _os.environ.get('JINA_MP_START_METHOD', None) if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}: from multiprocessing import set_start_method as _set_start_method try: _set_start_method(_start_method.lower()) _warnings.warn( f'multiprocessing start method is set to `{_start_method.lower()}`' ) except Exception as e: _warnings.warn( f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}' ) elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin': # DO SOME OS-WISE PATCHES # temporary fix for python 3.8 on macos where the default start is set to "spawn" # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods from multiprocessing import set_start_method as _set_start_method _set_start_method('fork') # do not change this line manually # this is managed by git tag and updated on every release # NOTE: this represents the NEXT release version __version__ = '3.14.1' # do not change this line manually # this is managed by proto/build-proto.sh and updated on every execution __proto_version__ = '0.1.13' try: __docarray_version__ = _docarray.__version__ except AttributeError as e: raise RuntimeError( '`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`' ) try: _signal.signal(_signal.SIGINT, _signal.default_int_handler) except Exception as exc: _warnings.warn(f'failed to set default signal handler: {exc!r}`') def _set_nofile(nofile_atleast=4096): """ Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256 temporary setting extinguishing with Python session. :param nofile_atleast: nofile soft limit :return: nofile soft limit and nofile hard limit """ try: import resource as res except ImportError: # Windows res = None if res is None: return (None,) * 2 soft, ohard = res.getrlimit(res.RLIMIT_NOFILE) hard = ohard if soft < nofile_atleast: soft = nofile_atleast if hard < soft: hard = soft try: res.setrlimit(res.RLIMIT_NOFILE, (soft, hard)) except (ValueError, res.error): try: hard = soft print(f'trouble with max limit, retrying with soft,hard {soft},{hard}') res.setrlimit(res.RLIMIT_NOFILE, (soft, hard)) except Exception: print('failed to set ulimit, giving up') soft, hard = res.getrlimit(res.RLIMIT_NOFILE) return soft, hard _set_nofile() # ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow # Document from jina._docarray import Document, DocumentArray # Client from jina.clients import Client # Deployment from jina.orchestrate.deployments import Deployment from jina.orchestrate.flow.asyncio import AsyncFlow # Flow from jina.orchestrate.flow.base import Flow # Executor from jina.serve.executors import BaseExecutor as Executor from jina.serve.executors.decorators import dynamic_batching, monitor, requests # Custom Gateway from jina.serve.gateway import BaseGateway as Gateway
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings as _warnings import docarray as _docarray if _sys.version_info < (3, 7, 0): raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}') def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs): return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % ( category.__name__, message, filename, lineno, ) _warnings.formatwarning = _warning_on_one_line _warnings.simplefilter('always', DeprecationWarning) # fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start _os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES' # JINA_MP_START_METHOD has higher priority than os-patch _start_method = _os.environ.get('JINA_MP_START_METHOD', None) if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}: from multiprocessing import set_start_method as _set_start_method try: _set_start_method(_start_method.lower()) _warnings.warn( f'multiprocessing start method is set to `{_start_method.lower()}`' ) except Exception as e: _warnings.warn( f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}' ) elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin': # DO SOME OS-WISE PATCHES # temporary fix for python 3.8 on macos where the default start is set to "spawn" # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods from multiprocessing import set_start_method as _set_start_method _set_start_method('fork') # do not change this line manually # this is managed by git tag and updated on every release # NOTE: this represents the NEXT release version __version__ = '3.14.0' # do not change this line manually # this is managed by proto/build-proto.sh and updated on every execution __proto_version__ = '0.1.13' try: __docarray_version__ = _docarray.__version__ except AttributeError as e: raise RuntimeError( '`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`' ) try: _signal.signal(_signal.SIGINT, _signal.default_int_handler) except Exception as exc: _warnings.warn(f'failed to set default signal handler: {exc!r}`') def _set_nofile(nofile_atleast=4096): """ Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256 temporary setting extinguishing with Python session. :param nofile_atleast: nofile soft limit :return: nofile soft limit and nofile hard limit """ try: import resource as res except ImportError: # Windows res = None if res is None: return (None,) * 2 soft, ohard = res.getrlimit(res.RLIMIT_NOFILE) hard = ohard if soft < nofile_atleast: soft = nofile_atleast if hard < soft: hard = soft try: res.setrlimit(res.RLIMIT_NOFILE, (soft, hard)) except (ValueError, res.error): try: hard = soft print(f'trouble with max limit, retrying with soft,hard {soft},{hard}') res.setrlimit(res.RLIMIT_NOFILE, (soft, hard)) except Exception: print('failed to set ulimit, giving up') soft, hard = res.getrlimit(res.RLIMIT_NOFILE) return soft, hard _set_nofile() # ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow # Document from jina._docarray import Document, DocumentArray # Client from jina.clients import Client # Deployment from jina.orchestrate.deployments import Deployment from jina.orchestrate.flow.asyncio import AsyncFlow # Flow from jina.orchestrate.flow.base import Flow # Executor from jina.serve.executors import BaseExecutor as Executor from jina.serve.executors.decorators import dynamic_batching, monitor, requests # Custom Gateway from jina.serve.gateway import BaseGateway as Gateway
from typing import List from llama_index.core.base.embeddings.base import BaseEmbedding from typing import Optional try: import chonkie from chonkie import AutoEmbeddings except ImportError: raise ImportError( "Could not import Autembeddings from chonkie. " "Please install it with `pip install chonkie[all]`." ) class ChonkieAutoEmbedding(BaseEmbedding): """ Autoembeddings from chonkie. Args: model_name (str): The name of the model to use. """ model_name: str embedder: Optional[chonkie.BaseEmbeddings] = None def __init__(self, model_name: str) -> None: super().__init__(model_name=model_name) self.embedder = AutoEmbeddings.get_embeddings(self.model_name) @classmethod def class_name(cls) -> str: return "ChonkieAutoEmbedding" def _get_embedding(self, text: str) -> List[float]: embed = self.embedder.embed(text) return embed.tolist() async def _aget_embedding(self, text: str) -> List[float]: return self._get_embedding(text) def _get_embeddings(self, texts: List[str]) -> List[List[float]]: embeds = self.embedder.embed_batch(texts) return [e.tolist() for e in embeds] async def _aget_embeddings( self, texts: List[str], ) -> List[List[float]]: return self._get_embeddings(texts) def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return self._get_embedding(query) async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return await self._aget_embedding(query) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" return self._get_embedding(text)
from typing import List from llama_index.core.base.embeddings.base import BaseEmbedding from typing import Optional try: import chonkie from chonkie import AutoEmbeddings except ImportError: raise ImportError( "Could not import Autembeddings from chonkie. " "Please install it with `pip install chonkie[all]`." ) class ChonkieAutoEmbedding(BaseEmbedding): """ Autoembeddings from chonkie. Args: model_name (str): The name of the model to use. """ model_name: str embedder: Optional[chonkie.BaseEmbeddings] = None def __init__( self, model_name: str ) -> None: super().__init__(model_name=model_name) self.embedder = AutoEmbeddings.get_embeddings(self.model_name) @classmethod def class_name(cls) -> str: return "ChonkieAutoEmbedding" def _get_embedding(self, text: str) -> List[float]: embed = self.embedder.embed(text) return embed.tolist() async def _aget_embedding(self, text: str) -> List[float]: return self._get_embedding(text) def _get_embeddings(self, texts: List[str]) -> List[List[float]]: embeds = self.embedder.embed_batch(texts) return [e.tolist() for e in embeds] async def _aget_embeddings( self, texts: List[str], ) -> List[List[float]]: return self._get_embeddings(texts) def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return self._get_embedding(query) async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return await self._aget_embedding(query) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" return self._get_embedding(text)
import logging from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseBinaryClassificationEvaluator, SparseEncoder, SpladePooling, ) logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) # Initialize the SPLADE model model_name = "naver/splade-cocondenser-ensembledistil" model = SparseEncoder( modules=[ MLMTransformer(model_name), SpladePooling(pooling_strategy="max"), # You can also use 'sum' ], device="cuda:0", ) # Load a dataset with two text columns and a class label column # Using the Quora Duplicates dataset as an example eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]") # Initialize the evaluator binary_acc_evaluator = SparseBinaryClassificationEvaluator( sentences1=eval_dataset["sentence1"], sentences2=eval_dataset["sentence2"], labels=eval_dataset["label"], name="quora_duplicates_dev", show_progress_bar=True, similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"], ) results = binary_acc_evaluator(model) # Print the results print(f"Primary metric: {binary_acc_evaluator.primary_metric}") print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseBinaryClassificationEvaluator, SparseEncoder, SpladePooling, ) # Initialize the SPLADE model model_name = "naver/splade-cocondenser-ensembledistil" model = SparseEncoder( modules=[ MLMTransformer(model_name), SpladePooling(pooling_strategy="max"), # You can also use 'sum' ], device="cuda:0", ) # Load a dataset with two text columns and a class label column # Using the Quora Duplicates dataset as an example eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]") # Initialize the evaluator binary_acc_evaluator = SparseBinaryClassificationEvaluator( sentences1=eval_dataset["sentence1"], sentences2=eval_dataset["sentence2"], labels=eval_dataset["label"], name="quora_duplicates_dev", show_progress_bar=True, similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"], ) results = binary_acc_evaluator(model) # Print the results print(f"Primary metric: {binary_acc_evaluator.primary_metric}") print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( _delete_=True, type='SwinTransformer', pretrain_img_size=384, embed_dims=192, patch_size=4, window_size=12, mlp_ratio=4, depths=depths, num_heads=[6, 12, 24, 48], qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside pixel_decoder=dict( _delete_=True, type='PixelDecoder', norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU')), enforce_decoder_input_project=True)) # optimizer # weight_decay = 0.01 # norm_weight_decay = 0.0 # embed_weight_decay = 0.0 embed_multi = dict(lr_mult=1.0, decay_mult=0.0) norm_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'norm': norm_multi, 'absolute_pos_embed': embed_multi, 'relative_position_bias_table': embed_multi, 'query_embed': embed_multi } optim_wrapper = dict( optimizer=dict(lr=6e-5, weight_decay=0.01), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) max_epochs = 300 # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[250], gamma=0.1) ] train_cfg = dict(max_epochs=max_epochs)
_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( _delete_=True, type='SwinTransformer', pretrain_img_size=384, embed_dims=192, patch_size=4, window_size=12, mlp_ratio=4, depths=depths, num_heads=[6, 12, 24, 48], qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside pixel_decoder=dict( _delete_=True, type='PixelDecoder', norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU')), enforce_decoder_input_project=True)) # weight_decay = 0.01 # norm_weight_decay = 0.0 # embed_weight_decay = 0.0 embed_multi = dict(lr_mult=1.0, decay_mult=0.0) norm_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'norm': norm_multi, 'absolute_pos_embed': embed_multi, 'relative_position_bias_table': embed_multi, 'query_embed': embed_multi } # optimizer optimizer = dict( type='AdamW', lr=6e-5, weight_decay=0.01, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=True, step=[250], warmup='linear', warmup_by_epoch=False, warmup_ratio=1e-6, warmup_iters=1500) runner = dict(type='EpochBasedRunner', max_epochs=300)
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class ChannelMapper(BaseModule): r"""Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. act_cfg (dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). num_outs (int, optional): Number of output feature maps. There would be extra_convs when num_outs larger than the length of in_channels. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapper, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if num_outs is None: num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule( in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if num_outs > len(in_channels): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if i == len(in_channels): in_channel = in_channels[-1] else: in_channel = out_channels self.extra_convs.append( ConvModule( in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if i == 0: outs.append(self.extra_convs[0](inputs[-1])) else: outs.append(self.extra_convs[i](outs[-1])) return tuple(outs)
import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class ChannelMapper(BaseModule): r"""Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. act_cfg (dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). num_outs (int, optional): Number of output feature maps. There would be extra_convs when num_outs larger than the length of in_channels. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapper, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if num_outs is None: num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule( in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if num_outs > len(in_channels): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if i == len(in_channels): in_channel = in_channels[-1] else: in_channel = out_channels self.extra_convs.append( ConvModule( in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if i == 0: outs.append(self.extra_convs[0](inputs[-1])) else: outs.append(self.extra_convs[i](outs[-1])) return tuple(outs)
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, type='TOOD', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='TOODHead', num_classes=80, in_channels=256, stacked_convs=6, feat_channels=256, anchor_type='anchor_free', anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), initial_loss_cls=dict( type='FocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input gamma=2.0, alpha=0.25, loss_weight=1.0), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), train_cfg=dict( initial_epoch=4, initial_assigner=dict(type='ATSSAssigner', topk=9), assigner=dict(type='TaskAlignedAssigner', topk=13), alpha=1, beta=6, allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, type='TOOD', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='TOODHead', num_classes=80, in_channels=256, stacked_convs=6, feat_channels=256, anchor_type='anchor_free', anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), initial_loss_cls=dict( type='FocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input gamma=2.0, alpha=0.25, loss_weight=1.0), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), train_cfg=dict( initial_epoch=4, initial_assigner=dict(type='ATSSAssigner', topk=9), assigner=dict(type='TaskAlignedAssigner', topk=13), alpha=1, beta=6, allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
_base_ = './htc-without-semantic_r50_fpn_1x_coco.py' model = dict( data_preprocessor=dict(pad_seg=True), roi_head=dict( semantic_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[8]), semantic_head=dict( type='FusedSemanticHead', num_ins=5, fusion_level=1, seg_scale_factor=1 / 8, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2)))) train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict( dataset=dict( data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'), pipeline=train_pipeline))
_base_ = './htc_without_semantic_r50_fpn_1x_coco.py' model = dict( data_preprocessor=dict(pad_seg=True), roi_head=dict( semantic_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[8]), semantic_head=dict( type='FusedSemanticHead', num_ins=5, fusion_level=1, seg_scale_factor=1 / 8, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2)))) train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict( dataset=dict( data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'), pipeline=train_pipeline))
import json import datasets from tests.trainer.test_trainer import StoreLossCallback from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_accelerator, torch_device, ) class TestTrainerDistributedLoss(TestCasePlus): @require_torch_multi_accelerator def test_trainer(self): device_count = backend_device_count(torch_device) min_bs = 2 output_dir = self.get_auto_remove_tmp_dir() for gpu_num, enable, bs, name in ( (1, True, min_bs * device_count, "base"), (device_count, False, min_bs, "broken"), (device_count, True, min_bs, "fixed"), ): distributed_args = f"""--nproc_per_node={gpu_num} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed_loss.py """.split() args = f"--output_dir {output_dir}/{name} --per_device_train_batch_size {bs} --average_tokens_across_devices {enable}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) with open(f"{output_dir}/base_losses.json") as f: base_loss = json.load(f) with open(f"{output_dir}/broken_losses.json") as f: broken_loss = json.load(f) with open(f"{output_dir}/fixed_losses.json") as f: fixed_loss = json.load(f) broken_diff = [abs(base_loss[i] - broken_loss[i]) for i in range(len(base_loss))] fixed_diff = [abs(base_loss[i] - fixed_loss[i]) for i in range(len(base_loss))] sum_base = sum(base_loss) sum_broken = sum(broken_loss) relative_broken = abs(sum_base - sum_broken) / max(sum_base, sum_broken) # the gap may be smaller for other models, but it still ok. self.assertGreater(max(broken_diff), 0.5) self.assertLess(max(fixed_diff), 0.005) self.assertLess(relative_broken, 0.1) def run_distributed_training(training_args): set_seed(42) model_name = "nickypro/tinyllama-15M" dataset_name = "wikitext" dataset_config = "wikitext-2-raw-v1" dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:100]") tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token def tokenize_function(examples): return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True) tokenized_dataset = dataset.map(tokenize_function, batched=True) tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) model = AutoModelForCausalLM.from_pretrained(model_name) loss_callback = StoreLossCallback() training_args.logging_steps = 1 training_args.max_steps = 10 training_args.learning_rate = 3e-4 training_args.disable_tqdm = True training_args.dataloader_drop_last = True training_args.report_to = [] trainer = Trainer( model, training_args, train_dataset=tokenized_dataset, callbacks=[loss_callback], data_collator=data_collator, ) trainer.train() with open(training_args.output_dir + "_losses.json", "w") as f: json.dump(loss_callback.losses, f) if __name__ == "__main__": parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] run_distributed_training(training_args)
import json import datasets from tests.trainer.test_trainer import StoreLossCallback from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_accelerator, torch_device, ) class TestTrainerDistributedLoss(TestCasePlus): @require_torch_multi_accelerator def test_trainer(self): device_count = backend_device_count(torch_device) min_bs = 1 output_dir = self.get_auto_remove_tmp_dir() for gpu_num, enable, bs, name in ( (1, True, min_bs * device_count, "base"), (device_count, False, min_bs, "broken"), (device_count, True, min_bs, "fixed"), ): distributed_args = f"""--nproc_per_node={gpu_num} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed_loss.py """.split() args = f"--output_dir {output_dir}/{name} --per_device_train_batch_size {bs} --average_tokens_across_devices {enable}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) with open(f"{output_dir}/base_losses.json") as f: base_loss = json.load(f) with open(f"{output_dir}/broken_losses.json") as f: broken_loss = json.load(f) with open(f"{output_dir}/fixed_losses.json") as f: fixed_loss = json.load(f) broken_diff = [abs(base_loss[i] - broken_loss[i]) for i in range(len(base_loss))] fixed_diff = [abs(base_loss[i] - fixed_loss[i]) for i in range(len(base_loss))] sum_base = sum(base_loss) sum_broken = sum(broken_diff) relative_broken = abs(sum_base - sum_broken) / max(sum_base, sum_broken) self.assertGreater(max(broken_diff), 0.5) self.assertLess(max(fixed_diff), 0.005) self.assertLess(relative_broken, 0.1) def run_distributed_training(training_args): set_seed(42) model_name = "nickypro/tinyllama-15M" dataset_name = "wikitext" dataset_config = "wikitext-2-raw-v1" dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:17]") tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token def tokenize_function(examples): return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True) tokenized_dataset = dataset.map(tokenize_function, batched=True) tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) model = AutoModelForCausalLM.from_pretrained(model_name) loss_callback = StoreLossCallback() training_args.logging_steps = 1 training_args.max_steps = 10 training_args.learning_rate = 3e-4 training_args.disable_tqdm = True training_args.dataloader_drop_last = True training_args.report_to = [] trainer = Trainer( model, training_args, train_dataset=tokenized_dataset, callbacks=[loss_callback], data_collator=data_collator, ) trainer.train() with open(training_args.output_dir + "_losses.json", "w") as f: json.dump(loss_callback.losses, f) if __name__ == "__main__": parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] run_distributed_training(training_args)
import gc import unittest import torch from diffusers import ( DDIMScheduler, StableDiffusionXLImg2ImgPipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import SDXLSingleFileTesterMixin enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): pipeline_class = StableDiffusionXLImg2ImgPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" repo_id = "stabilityai/stable-diffusion-xl-base-1.0" original_config = ( "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" ) def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) @slow @require_torch_accelerator class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCase): pipeline_class = StableDiffusionXLImg2ImgPipeline ckpt_path = ( "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors" ) repo_id = "stabilityai/stable-diffusion-xl-refiner-1.0" original_config = ( "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" ) def test_single_file_format_inference_is_same_as_pretrained(self): init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload(device=torch_device) generator = torch.Generator(device="cpu").manual_seed(0) image = pipe( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16) pipe_single_file.scheduler = DDIMScheduler.from_config(pipe_single_file.scheduler.config) pipe_single_file.unet.set_default_attn_processor() pipe_single_file.enable_model_cpu_offload(device=torch_device) generator = torch.Generator(device="cpu").manual_seed(0) image_single_file = pipe_single_file( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten()) assert max_diff < 5e-4
import gc import unittest import torch from diffusers import ( DDIMScheduler, StableDiffusionXLImg2ImgPipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import SDXLSingleFileTesterMixin enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): pipeline_class = StableDiffusionXLImg2ImgPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" repo_id = "stabilityai/stable-diffusion-xl-base-1.0" original_config = ( "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" ) def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) @slow @require_torch_accelerator class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCase): pipeline_class = StableDiffusionXLImg2ImgPipeline ckpt_path = ( "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors" ) repo_id = "stabilityai/stable-diffusion-xl-refiner-1.0" original_config = ( "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" ) def test_single_file_format_inference_is_same_as_pretrained(self): init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image = pipe( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16) pipe_single_file.scheduler = DDIMScheduler.from_config(pipe_single_file.scheduler.config) pipe_single_file.unet.set_default_attn_processor() pipe_single_file.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image_single_file = pipe_single_file( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten()) assert max_diff < 5e-4
import gzip import logging import os import sys from datetime import datetime from torch.utils.data import DataLoader from sentence_transformers import LoggingHandler, SentenceTransformer, datasets, evaluation, losses, models, util #### Just some code to print debug information to stdout logging.basicConfig( format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()] ) #### /print debug information to stdout ################# Download AskUbuntu and extract training corpus ################# askubuntu_folder = "data/askubuntu" result_folder = "output/askubuntu-tsdae-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") batch_size = 8 ## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]: filepath = os.path.join(askubuntu_folder, filename) if not os.path.exists(filepath): util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath) # Read the corpus corpus = {} dev_test_ids = set() with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn: for line in fIn: splits = line.strip().split("\t") id = splits[0] title = splits[1] corpus[id] = title # Read dev & test dataset def read_eval_dataset(filepath): dataset = [] with open(filepath) as fIn: for line in fIn: query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t") if len(relevant_id) == 0: # Skip examples without relevant entries continue relevant_id = relevant_id.split(" ") candidate_ids = candidate_ids.split(" ") negative_ids = set(candidate_ids) - set(relevant_id) dataset.append( { "query": corpus[query_id], "positive": [corpus[pid] for pid in relevant_id], "negative": [corpus[pid] for pid in negative_ids], } ) dev_test_ids.add(query_id) dev_test_ids.update(candidate_ids) return dataset dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt")) test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt")) ## Now we need a list of train sentences. ## In this example we simply use all sentences that don't appear in the train/dev set train_sentences = [] for id, sentence in corpus.items(): if id not in dev_test_ids: train_sentences.append(sentence) logging.info("{} train sentences".format(len(train_sentences))) ################# Initialize an SBERT model ################# model_name = sys.argv[1] if len(sys.argv) >= 2 else "bert-base-uncased" word_embedding_model = models.Transformer(model_name) # Apply **cls** pooling to get one fixed sized sentence vector pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls") model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) ################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) ################# # We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True) # Create a dev evaluator dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev") logging.info("Dev performance before training") dev_evaluator(model) total_steps = 20000 logging.info("Start training") model.fit( train_objectives=[(train_dataloader, train_loss)], evaluator=dev_evaluator, evaluation_steps=1000, epochs=1, steps_per_epoch=total_steps, weight_decay=0, scheduler="constantlr", optimizer_params={"lr": 3e-5}, output_path=result_folder, show_progress_bar=True, )
from sentence_transformers import SentenceTransformer, LoggingHandler from sentence_transformers import models, util, datasets, evaluation, losses import logging import os import gzip from torch.utils.data import DataLoader from datetime import datetime import sys #### Just some code to print debug information to stdout logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO, handlers=[LoggingHandler()]) #### /print debug information to stdout ################# Download AskUbuntu and extract training corpus ################# askubuntu_folder = 'data/askubuntu' result_folder = 'output/askubuntu-tsdae-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S") batch_size = 8 ## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu for filename in ['text_tokenized.txt.gz', 'dev.txt', 'test.txt', 'train_random.txt']: filepath = os.path.join(askubuntu_folder, filename) if not os.path.exists(filepath): util.http_get('https://github.com/taolei87/askubuntu/raw/master/'+filename, filepath) # Read the corpus corpus = {} dev_test_ids = set() with gzip.open(os.path.join(askubuntu_folder, 'text_tokenized.txt.gz'), 'rt', encoding='utf8') as fIn: for line in fIn: splits = line.strip().split("\t") id = splits[0] title = splits[1] corpus[id] = title # Read dev & test dataset def read_eval_dataset(filepath): dataset = [] with open(filepath) as fIn: for line in fIn: query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t") if len(relevant_id) == 0: #Skip examples without relevant entries continue relevant_id = relevant_id.split(" ") candidate_ids = candidate_ids.split(" ") negative_ids = set(candidate_ids) - set(relevant_id) dataset.append({ 'query': corpus[query_id], 'positive': [corpus[pid] for pid in relevant_id], 'negative': [corpus[pid] for pid in negative_ids] }) dev_test_ids.add(query_id) dev_test_ids.update(candidate_ids) return dataset dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'dev.txt')) test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'test.txt')) ## Now we need a list of train sentences. ## In this example we simply use all sentences that don't appear in the train/dev set train_sentences = [] for id, sentence in corpus.items(): if id not in dev_test_ids: train_sentences.append(sentence) logging.info("{} train sentences".format(len(train_sentences))) ################# Intialize an SBERT model ################# model_name = sys.argv[1] if len(sys.argv) >= 2 else 'bert-base-uncased' word_embedding_model = models.Transformer(model_name) # Apply **cls** pooling to get one fixed sized sentence vector pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), 'cls') model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) ################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) ################# # We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True) # Create a dev evaluator dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name='AskUbuntu dev') logging.info("Dev performance before training") dev_evaluator(model) total_steps = 20000 logging.info("Start training") model.fit( train_objectives=[(train_dataloader, train_loss)], evaluator=dev_evaluator, evaluation_steps=1000, epochs=1, steps_per_epoch=total_steps, weight_decay=0, scheduler='constantlr', optimizer_params={'lr': 3e-5}, output_path=result_folder, show_progress_bar=True )
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import pytest from mmengine import Config, DefaultScope from mmengine.hub import get_config, get_model from mmengine.utils import get_installed_path, is_installed data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/') # mmdet has a more typical config structure, while mmpose has a complex # config structure @pytest.mark.skipif( not (is_installed('mmdet') and is_installed('mmpose')), reason='mmdet and mmpose should be installed') def test_get_config(): # Test load base config. base_cfg = get_config('mmdet::_base_/models/faster_rcnn_r50_fpn.py') package_path = get_installed_path('mmdet') test_base_cfg = Config.fromfile( osp.join(package_path, '.mim', 'configs/_base_/models/faster_rcnn_r50_fpn.py')) assert test_base_cfg._cfg_dict == base_cfg._cfg_dict # Test load faster_rcnn config cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py') test_cfg = Config.fromfile( osp.join(package_path, '.mim', 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py')) assert cfg._cfg_dict == test_cfg._cfg_dict # Test pretrained cfg = get_config( 'mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', pretrained=True) assert cfg.model_path == 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa E301 # Test load mmpose get_config( 'mmpose::face/2d_kpt_sview_rgb_img/deeppose/wflw/res50_wflw_256x256' '.py') @pytest.mark.skipif( not is_installed('mmdet'), reason='mmdet and mmpose should be installed') def test_get_model(): # TODO compatible with downstream codebase. DefaultScope.get_instance('test_get_model', scope_name='test_scope') get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py') assert DefaultScope.get_current_instance().scope_name == 'test_scope' DefaultScope._instance_dict.pop('test_get_model')
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import pytest from mmengine import Config, DefaultScope from mmengine.hub import get_config, get_model from mmengine.utils import get_installed_path, is_installed data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/') # mmdet has a more typical config structure, while mmpose has a complex # config structure @pytest.mark.skipif( not (is_installed('mmdet') and is_installed('mmpose')), reason='mmdet and mmpose should be installed') def test_get_config(): # Test load base config. base_cfg = get_config('mmdet::_base_/models/faster_rcnn_r50_fpn.py') package_path = get_installed_path('mmdet') test_base_cfg = Config.fromfile( osp.join(package_path, '.mim', 'configs/_base_/models/faster_rcnn_r50_fpn.py')) assert test_base_cfg._cfg_dict == base_cfg._cfg_dict # Test load faster_rcnn config cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py') test_cfg = Config.fromfile( osp.join(package_path, '.mim', 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py')) assert cfg._cfg_dict == test_cfg._cfg_dict # Test pretrained cfg = get_config( 'mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', pretrained=True) assert cfg.model_path == 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa E301 # Test load mmpose get_config( 'mmpose::face/2d_kpt_sview_rgb_img/deeppose/wflw/res50_wflw_256x256' '.py') @pytest.mark.skipif( not is_installed('mmdet'), reason='mmdet and mmpose should be installed') def test_get_model(): # TODO compatible with downstream codebase. DefaultScope.get_instance('test_get_model', scope_name='test_scope') get_model('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py') assert DefaultScope.get_current_instance().scope_name == 'test_scope' DefaultScope._instance_dict.pop('test_get_model')
"""Example selectors. **Example selector** implements logic for selecting examples to include them in prompts. This allows us to select examples that are most relevant to the input. """ from langchain_core.example_selectors.base import BaseExampleSelector from langchain_core.example_selectors.length_based import ( LengthBasedExampleSelector, ) from langchain_core.example_selectors.semantic_similarity import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector, sorted_values, ) __all__ = [ "BaseExampleSelector", "LengthBasedExampleSelector", "MaxMarginalRelevanceExampleSelector", "SemanticSimilarityExampleSelector", "sorted_values", ]
"""**Example selector** implements logic for selecting examples to include them in prompts. This allows us to select examples that are most relevant to the input. """ from langchain_core.example_selectors.base import BaseExampleSelector from langchain_core.example_selectors.length_based import ( LengthBasedExampleSelector, ) from langchain_core.example_selectors.semantic_similarity import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector, sorted_values, ) __all__ = [ "BaseExampleSelector", "LengthBasedExampleSelector", "MaxMarginalRelevanceExampleSelector", "SemanticSimilarityExampleSelector", "sorted_values", ]
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import UnstructuredPowerPointLoader # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "UnstructuredPowerPointLoader": "langchain_community.document_loaders", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "UnstructuredPowerPointLoader", ]
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import UnstructuredPowerPointLoader # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "UnstructuredPowerPointLoader": "langchain_community.document_loaders" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "UnstructuredPowerPointLoader", ]
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest from jina import Document, DocumentArray, Executor from transformer_tf_text_encode import TransformerTFTextEncoder target_dim = 768 @pytest.fixture() def docs_generator(): return DocumentArray((Document(text='random text') for _ in range(30))) def test_config(): ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert ex.pretrained_model_name_or_path == 'distilbert-base-uncased' def test_tf_batch(docs_generator): encoder = TransformerTFTextEncoder() docs = docs_generator encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']}) assert len(docs.get_attributes('embedding')) == 30 assert docs[0].embedding.shape == (target_dim,) def test_encodes_semantic_meaning(): sentences = dict() sentences['A'] = 'Hello, my name is Michael.' sentences['B'] = 'Today we are going to Disney World.' sentences['C'] = 'There are animals on the road' sentences['D'] = 'A dog is running down the road' encoder = TransformerTFTextEncoder() embeddings = {} for id_, sentence in sentences.items(): docs = DocumentArray([Document(text=sentence)]) encoder.encode(docs, parameters={}) embeddings[id_] = docs[0].embedding def dist(a, b): a_embedding = embeddings[a] b_embedding = embeddings[b] return np.linalg.norm(a_embedding - b_embedding) small_distance = dist('C', 'D') assert small_distance < dist('C', 'B') assert small_distance < dist('C', 'A') assert small_distance < dist('B', 'A') @pytest.mark.parametrize( ['docs', 'docs_per_path', 'traversal_path'], [ ( pytest.lazy_fixture('docs_with_text'), [[['r'], 10], [['c'], 0], [['cc'], 0]], ['r'], ), ( pytest.lazy_fixture("docs_with_chunk_text"), [[['r'], 1], [['c'], 10], [['cc'], 0]], ['c'], ), ( pytest.lazy_fixture("docs_with_chunk_chunk_text"), [[['r'], 1], [['c'], 1], [['cc'], 10]], ['cc'], ), ], ) def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path): encoder = TransformerTFTextEncoder() encoder.encode(docs, parameters={'traversal_paths': traversal_path}) for path, count in docs_per_path: assert len(docs.traverse_flat(path).get_attributes("embedding")) == count
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest from jina import Document, DocumentArray, Executor from ...transformer_tf_text_encode import TransformerTFTextEncoder target_dim = 768 @pytest.fixture() def docs_generator(): return DocumentArray((Document(text='random text') for _ in range(30))) def test_config(): ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert ex.pretrained_model_name_or_path == 'distilbert-base-uncased' def test_tf_batch(docs_generator): encoder = TransformerTFTextEncoder() docs = docs_generator encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']}) assert len(docs.get_attributes('embedding')) == 30 assert docs[0].embedding.shape == (target_dim,) def test_encodes_semantic_meaning(): sentences = dict() sentences['A'] = 'Hello, my name is Michael.' sentences['B'] = 'Today we are going to Disney World.' sentences['C'] = 'There are animals on the road' sentences['D'] = 'A dog is running down the road' encoder = TransformerTFTextEncoder() embeddings = {} for id_, sentence in sentences.items(): docs = DocumentArray([Document(text=sentence)]) encoder.encode(docs, parameters={}) embeddings[id_] = docs[0].embedding def dist(a, b): a_embedding = embeddings[a] b_embedding = embeddings[b] return np.linalg.norm(a_embedding - b_embedding) small_distance = dist('C', 'D') assert small_distance < dist('C', 'B') assert small_distance < dist('C', 'A') assert small_distance < dist('B', 'A') @pytest.mark.parametrize( ['docs', 'docs_per_path', 'traversal_path'], [ ( pytest.lazy_fixture('docs_with_text'), [[['r'], 10], [['c'], 0], [['cc'], 0]], ['r'], ), ( pytest.lazy_fixture("docs_with_chunk_text"), [[['r'], 1], [['c'], 10], [['cc'], 0]], ['c'], ), ( pytest.lazy_fixture("docs_with_chunk_chunk_text"), [[['r'], 1], [['c'], 1], [['cc'], 10]], ['cc'], ), ], ) def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path): encoder = TransformerTFTextEncoder() encoder.encode(docs, parameters={'traversal_paths': traversal_path}) for path, count in docs_per_path: assert len(docs.traverse_flat(path).get_attributes("embedding")) == count
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple if TYPE_CHECKING: # pragma: no cover from docarray import DocumentArray from docarray.typing import AnyDNN, T, ArrayType import numpy as np class SingletonSugarMixin: """Provide sugary syntax for :class:`Document` by inheriting methods from :class:`DocumentArray`""" @overload def match( self: 'T', darray: 'DocumentArray', metric: Union[ str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray'] ] = 'cosine', limit: Optional[Union[int, float]] = 20, normalization: Optional[Tuple[float, float]] = None, metric_name: Optional[str] = None, batch_size: Optional[int] = None, exclude_self: bool = False, only_id: bool = False, use_scipy: bool = False, num_worker: Optional[int] = 1, ) -> 'T': """Matching the current Document against a set of Documents. The result will be stored in :attr:`.matches`. .. note:: When you want to match a set Documents (let's call it set `A`) against another set of Documents (set `B`), where you want to find for each element in `A` what are its nearest neighbours in `B`. Then you need :meth:`DocumentArray.match` :param darray: the other DocumentArray to match against :param metric: the distance metric :param limit: the maximum number of matches, when not given defaults to 20. :param normalization: a tuple [a, b] to be used with min-max normalization, the min distance will be rescaled to `a`, the max distance will be rescaled to `b` all values will be rescaled into range `[a, b]`. :param metric_name: if provided, then match result will be marked with this string. :param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size`` elements. When `darray` is big, this can significantly speedup the computation. :param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be considered as matches. :param only_id: if set, then returning matches will only contain ``id`` :param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance on sparse matrix. :param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used. .. note:: This argument is only effective when ``batch_size`` is set. """ ... def match(self: 'T', *args, **kwargs) -> 'T': from docarray import DocumentArray _tmp = DocumentArray(self) _tmp.match(*args, **kwargs) return self @overload def embed( self: 'T', embed_model: 'AnyDNN', device: str = 'cpu', batch_size: int = 256, ) -> 'T': """Fill the embedding of Documents inplace by using `embed_model` :param embed_model: the embedding model written in Keras/Pytorch/Paddle :param device: the computational device for `embed_model`, can be either `cpu` or `cuda`. :param batch_size: number of Documents in a batch for embedding """ def embed(self: 'T', *args, **kwargs) -> 'T': from docarray import DocumentArray _tmp = DocumentArray(self) _tmp.embed(*args, **kwargs) return self def post(self: 'T', *args, **kwargs) -> 'T': from docarray import DocumentArray _tmp = DocumentArray(self) return _tmp.post(*args, **kwargs)[0]
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple if TYPE_CHECKING: from docarray import DocumentArray from docarray.typing import AnyDNN, T, ArrayType import numpy as np class SingletonSugarMixin: """Provide sugary syntax for :class:`Document` by inheriting methods from :class:`DocumentArray`""" @overload def match( self: 'T', darray: 'DocumentArray', metric: Union[ str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray'] ] = 'cosine', limit: Optional[Union[int, float]] = 20, normalization: Optional[Tuple[float, float]] = None, metric_name: Optional[str] = None, batch_size: Optional[int] = None, exclude_self: bool = False, only_id: bool = False, use_scipy: bool = False, num_worker: Optional[int] = 1, ) -> 'T': """Matching the current Document against a set of Documents. The result will be stored in :attr:`.matches`. .. note:: When you want to match a set Documents (let's call it set `A`) against another set of Documents (set `B`), where you want to find for each element in `A` what are its nearest neighbours in `B`. Then you need :meth:`DocumentArray.match` :param darray: the other DocumentArray to match against :param metric: the distance metric :param limit: the maximum number of matches, when not given defaults to 20. :param normalization: a tuple [a, b] to be used with min-max normalization, the min distance will be rescaled to `a`, the max distance will be rescaled to `b` all values will be rescaled into range `[a, b]`. :param metric_name: if provided, then match result will be marked with this string. :param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size`` elements. When `darray` is big, this can significantly speedup the computation. :param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be considered as matches. :param only_id: if set, then returning matches will only contain ``id`` :param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance on sparse matrix. :param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used. .. note:: This argument is only effective when ``batch_size`` is set. """ ... def match(self: 'T', *args, **kwargs) -> 'T': from docarray import DocumentArray _tmp = DocumentArray(self) _tmp.match(*args, **kwargs) return self @overload def embed( self: 'T', embed_model: 'AnyDNN', device: str = 'cpu', batch_size: int = 256, ) -> 'T': """Fill the embedding of Documents inplace by using `embed_model` :param embed_model: the embedding model written in Keras/Pytorch/Paddle :param device: the computational device for `embed_model`, can be either `cpu` or `cuda`. :param batch_size: number of Documents in a batch for embedding """ def embed(self: 'T', *args, **kwargs) -> 'T': from docarray import DocumentArray _tmp = DocumentArray(self) _tmp.embed(*args, **kwargs) return self def post(self: 'T', *args, **kwargs) -> 'T': from docarray import DocumentArray _tmp = DocumentArray(self) return _tmp.post(*args, **kwargs)[0]
from jina.clients.base.http import HTTPBaseClient from jina.clients.mixin import ( AsyncHealthCheckMixin, AsyncMutateMixin, AsyncPostMixin, AsyncProfileMixin, HealthCheckMixin, MutateMixin, PostMixin, ProfileMixin, ) class HTTPClient( HTTPBaseClient, PostMixin, ProfileMixin, MutateMixin, HealthCheckMixin ): """A client connecting to a Gateway using gRPC protocol. Instantiate this class through the :meth:`jina.Client` convenience method. EXAMPLE USAGE .. code-block:: python from jina import Client from docarray import Document # select host address to connect to c = Client( protocol='http', asyncio=False, host='http://my.awesome.flow:1234' ) # returns HTTPClient instance c.post(on='/index', inputs=Document(text='hello!')) """ class AsyncHTTPClient( HTTPBaseClient, AsyncPostMixin, AsyncMutateMixin, AsyncProfileMixin, AsyncHealthCheckMixin, ): """ Asynchronous client connecting to a Gateway using HTTP protocol. Instantiate this class through the :meth:`jina.Client` convenience method. Unlike :class:`HTTPClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax), simply calling them will not schedule them to be executed. To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``, ``asyncio.create_task()``. :class:`AsyncHTTPClient` can be very useful in the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program. In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client` is controlling and wrapping the event loop internally, making the Client looks synchronous from outside. EXAMPLE USAGE .. code-block:: python from jina import Client from docarray import Document # async inputs for the client async def async_inputs(): for _ in range(10): yield Document() await asyncio.sleep(0.1) # select host address to connect to c = Client( protocol='http', asyncio=True, host='http://my.awesome.flow:1234' ) # returns AsyncHTTPClient instance async for resp in client.post(on='/index', async_inputs, request_size=1): print(resp) """
from jina.clients.base.http import HTTPBaseClient from jina.clients.mixin import ( AsyncHealthCheckMixin, AsyncMutateMixin, AsyncPostMixin, HealthCheckMixin, MutateMixin, PostMixin, ) class HTTPClient(HTTPBaseClient, PostMixin, MutateMixin, HealthCheckMixin): """A client connecting to a Gateway using gRPC protocol. Instantiate this class through the :meth:`jina.Client` convenience method. EXAMPLE USAGE .. code-block:: python from jina import Client from docarray import Document # select host address to connect to c = Client( protocol='http', asyncio=False, host='http://my.awesome.flow:1234' ) # returns HTTPClient instance c.post(on='/index', inputs=Document(text='hello!')) """ class AsyncHTTPClient( HTTPBaseClient, AsyncPostMixin, AsyncMutateMixin, AsyncHealthCheckMixin ): """ Asynchronous client connecting to a Gateway using HTTP protocol. Instantiate this class through the :meth:`jina.Client` convenience method. Unlike :class:`HTTPClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax), simply calling them will not schedule them to be executed. To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``, ``asyncio.create_task()``. :class:`AsyncHTTPClient` can be very useful in the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program. In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client` is controlling and wrapping the event loop internally, making the Client looks synchronous from outside. EXAMPLE USAGE .. code-block:: python from jina import Client from docarray import Document # async inputs for the client async def async_inputs(): for _ in range(10): yield Document() await asyncio.sleep(0.1) # select host address to connect to c = Client( protocol='http', asyncio=True, host='http://my.awesome.flow:1234' ) # returns AsyncHTTPClient instance async for resp in client.post(on='/index', async_inputs, request_size=1): print(resp) """
import os import warnings from modulefinder import Module import torch from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils from .extension import _HAS_OPS try: from .version import __version__ # noqa: F401 except ImportError: pass # Check if torchvision is being imported within the root folder if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join( os.path.realpath(os.getcwd()), "torchvision" ): message = ( "You are importing torchvision within its own root folder ({}). " "This is not expected to work and may give errors. Please exit the " "torchvision project source and relaunch your python interpreter." ) warnings.warn(message.format(os.getcwd())) _image_backend = "PIL" _video_backend = "pyav" def set_image_backend(backend): """ Specifies the package used to load images. Args: backend (string): Name of the image backend. one of {'PIL', 'accimage'}. The :mod:`accimage` package uses the Intel IPP library. It is generally faster than PIL, but does not support as many operations. """ global _image_backend if backend not in ["PIL", "accimage"]: raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'") _image_backend = backend def get_image_backend(): """ Gets the name of the package used to load images """ return _image_backend def set_video_backend(backend): """ Specifies the package used to decode videos. Args: backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic binding for the FFmpeg libraries. The :mod:`video_reader` package includes a native C++ implementation on top of FFMPEG libraries, and a python API of TorchScript custom operator. It generally decodes faster than :mod:`pyav`, but is perhaps less robust. .. note:: Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader' backend, please compile torchvision from source. """ global _video_backend if backend not in ["pyav", "video_reader", "cuda"]: raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend) if backend == "video_reader" and not io._HAS_VIDEO_OPT: # TODO: better messages message = "video_reader video backend is not available. Please compile torchvision from source and try again" raise RuntimeError(message) elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER: # TODO: better messages message = "cuda video backend is not available." raise RuntimeError(message) else: _video_backend = backend def get_video_backend(): """ Returns the currently active video backend used to decode videos. Returns: str: Name of the video backend. one of {'pyav', 'video_reader'}. """ return _video_backend def _is_tracing(): return torch._C._get_tracing_state()
import os import warnings from modulefinder import Module import torch from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils from .extension import _HAS_OPS try: from .version import __version__ # noqa: F401 except ImportError: pass # Check if torchvision is being imported within the root folder if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join( os.path.realpath(os.getcwd()), "torchvision" ): message = ( "You are importing torchvision within its own root folder ({}). " "This is not expected to work and may give errors. Please exit the " "torchvision project source and relaunch your python interpreter." ) warnings.warn(message.format(os.getcwd())) _image_backend = "PIL" _video_backend = "pyav" def set_image_backend(backend): """ Specifies the package used to load images. Args: backend (string): Name of the image backend. one of {'PIL', 'accimage'}. The :mod:`accimage` package uses the Intel IPP library. It is generally faster than PIL, but does not support as many operations. """ global _image_backend if backend not in ["PIL", "accimage"]: raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'") _image_backend = backend def get_image_backend(): """ Gets the name of the package used to load images """ return _image_backend def set_video_backend(backend): """ Specifies the package used to decode videos. Args: backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic binding for the FFmpeg libraries. The :mod:`video_reader` package includes a native C++ implementation on top of FFMPEG libraries, and a python API of TorchScript custom operator. It generally decodes faster than :mod:`pyav`, but is perhaps less robust. .. note:: Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader' backend, please compile torchvision from source. """ global _video_backend if backend not in ["pyav", "video_reader", "cuda"]: raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend) if backend == "video_reader" and not io._HAS_VIDEO_OPT: # TODO: better messages message = "video_reader video backend is not available. Please compile torchvision from source and try again" raise RuntimeError(message) elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER: # TODO: better messages message = "cuda video backend is not available." raise RuntimeError(message) else: _video_backend = backend def get_video_backend(): """ Returns the currently active video backend used to decode videos. Returns: str: Name of the video backend. one of {'pyav', 'video_reader'}. """ return _video_backend def _is_tracing(): return torch._C._get_tracing_state() _WARN_ABOUT_BETA_TRANSFORMS = True _BETA_TRANSFORMS_WARNING = ( "The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. " "While we do not expect major breaking changes, some APIs may still change " "according to user feedback. Please submit any feedback you may have in " "this issue: https://github.com/pytorch/vision/issues/6753, and you can also " "check out https://github.com/pytorch/vision/issues/7319 to learn more about " "the APIs that we suspect might involve future changes. " "You can silence this warning by calling torchvision.disable_beta_transforms_warning()." ) def disable_beta_transforms_warning(): global _WARN_ABOUT_BETA_TRANSFORMS _WARN_ABOUT_BETA_TRANSFORMS = False
from __future__ import annotations from sentence_transformers.training_args import SentenceTransformerTrainingArguments class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments): r""" CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of available arguments. Args: output_dir (`str`): The output directory where the model checkpoints will be written. prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*): The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted: 1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`. 2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`. 3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`. 4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to prompts. This should only be used if your training/evaluation/test datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`. batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*): The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options. Defaults to ``BatchSamplers.BATCH_SAMPLER``. multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*): The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers` for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``. learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*): A mapping of parameter name regular expressions to learning rates. This allows you to set different learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is useful when you want to fine-tune specific parts of the model with different learning rates. """
from __future__ import annotations from sentence_transformers.training_args import SentenceTransformerTrainingArguments class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments): """ CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of available arguments. Args: output_dir (`str`): The output directory where the model checkpoints will be written. prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*): The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted: 1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`. 2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`. 3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`. 4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to prompts. This should only be used if your training/evaluation/test datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`. batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*): The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options. Defaults to ``BatchSamplers.BATCH_SAMPLER``. multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*): The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers` for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``. """
import json from typing import Any, Type, TypeGuard, TypeVar, overload import jsonschema from fastapi.encoders import jsonable_encoder from pydantic import BaseModel from .type import type_match def to_dict(data) -> dict: if isinstance(data, BaseModel): data = data.model_dump() elif isinstance(data, str): data = loads(data) return jsonable_encoder(data) def dumps(data) -> str: return json.dumps(jsonable_encoder(data)) T = TypeVar("T") @overload def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ... @overload def loads(data: str, *args, **kwargs) -> Any: ... def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any: parsed = json.loads(data, *args, **kwargs) if target_type: return type_match(parsed, target_type) return parsed def validate_with_jsonschema( schema: dict[str, Any], data: dict[str, Any] ) -> str | None: """ Validate the data against the schema. Returns the validation error message if the data does not match the schema. """ try: jsonschema.validate(data, schema) return None except jsonschema.ValidationError as e: return str(e) def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]: return isinstance(value, list) and all( isinstance(item, BaseModel) for item in value ) def convert_pydantic_to_json(output_data: Any) -> Any: if isinstance(output_data, BaseModel): return output_data.model_dump() if is_list_of_basemodels(output_data): return [item.model_dump() for item in output_data] return output_data
import json from typing import Any, Type, TypeGuard, TypeVar, overload import jsonschema from fastapi.encoders import jsonable_encoder from pydantic import BaseModel from .type import type_match def to_dict(data) -> dict: return jsonable_encoder(data) def dumps(data) -> str: return json.dumps(jsonable_encoder(data)) T = TypeVar("T") @overload def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ... @overload def loads(data: str, *args, **kwargs) -> Any: ... def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any: parsed = json.loads(data, *args, **kwargs) if target_type: return type_match(parsed, target_type) return parsed def validate_with_jsonschema( schema: dict[str, Any], data: dict[str, Any] ) -> str | None: """ Validate the data against the schema. Returns the validation error message if the data does not match the schema. """ try: jsonschema.validate(data, schema) return None except jsonschema.ValidationError as e: return str(e) def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]: return isinstance(value, list) and all( isinstance(item, BaseModel) for item in value ) def convert_pydantic_to_json(output_data: Any) -> Any: if isinstance(output_data, BaseModel): return output_data.model_dump() if is_list_of_basemodels(output_data): return [item.model_dump() for item in output_data] return output_data
from io import BytesIO from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray from docarray.utils._internal.misc import import_library if TYPE_CHECKING: from pydantic.fields import BaseConfig, ModelField from docarray.proto import NodeProto T = TypeVar('T', bound='VideoBytes') class VideoLoadResult(NamedTuple): video: VideoNdArray audio: AudioNdArray key_frame_indices: NdArray @_register_proto(proto_type_name='video_bytes') class VideoBytes(bytes, AbstractType): """ Bytes that store a video and that can be load into a video tensor """ @classmethod def validate( cls: Type[T], value: Any, field: 'ModelField', config: 'BaseConfig', ) -> T: value = bytes_validator(value) return cls(value) @classmethod def from_protobuf(cls: Type[T], pb_msg: T) -> T: return parse_obj_as(cls, pb_msg) def _to_node_protobuf(self: T) -> 'NodeProto': from docarray.proto import NodeProto return NodeProto(blob=self, type=self._proto_type_name) def load(self, **kwargs) -> VideoLoadResult: """ Load the video from the bytes into a VideoLoadResult object consisting of a VideoNdArray (`VideoLoadResult.video`), an AudioNdArray (`VideoLoadResult.audio`) and an NdArray containing the key frame indices (`VideoLoadResult.key_frame_indices`). --- ```python from docarray import BaseDoc from docarray.typing import VideoUrl import numpy as np class MyDoc(BaseDoc): video_url: VideoUrl doc = MyDoc( video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' ) video, audio, key_frame_indices = doc.video_url.load() assert isinstance(video, np.ndarray) assert isinstance(audio, np.ndarray) assert isinstance(key_frame_indices, np.ndarray) ``` --- :param kwargs: supports all keyword arguments that are being supported by av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open) :return: a VideoLoadResult instance with video, audio and keyframe indices """ if TYPE_CHECKING: import av else: av = import_library('av') with av.open(BytesIO(self), **kwargs) as container: audio_frames = [] video_frames = [] keyframe_indices = [] for frame in container.decode(): if type(frame) == av.audio.frame.AudioFrame: audio_frames.append(frame.to_ndarray()) elif type(frame) == av.video.frame.VideoFrame: video_frames.append(frame.to_ndarray(format='rgb24')) if frame.key_frame == 1: curr_index = len(video_frames) keyframe_indices.append(curr_index) if len(audio_frames) == 0: audio = parse_obj_as(AudioNdArray, np.array(audio_frames)) else: audio = parse_obj_as(AudioNdArray, np.stack(audio_frames)) video = parse_obj_as(VideoNdArray, np.stack(video_frames)) indices = parse_obj_as(NdArray, keyframe_indices) return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
from io import BytesIO from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray if TYPE_CHECKING: from pydantic.fields import BaseConfig, ModelField from docarray.proto import NodeProto T = TypeVar('T', bound='VideoBytes') class VideoLoadResult(NamedTuple): video: VideoNdArray audio: AudioNdArray key_frame_indices: NdArray @_register_proto(proto_type_name='video_bytes') class VideoBytes(bytes, AbstractType): """ Bytes that store a video and that can be load into a video tensor """ @classmethod def validate( cls: Type[T], value: Any, field: 'ModelField', config: 'BaseConfig', ) -> T: value = bytes_validator(value) return cls(value) @classmethod def from_protobuf(cls: Type[T], pb_msg: T) -> T: return parse_obj_as(cls, pb_msg) def _to_node_protobuf(self: T) -> 'NodeProto': from docarray.proto import NodeProto return NodeProto(blob=self, type=self._proto_type_name) def load(self, **kwargs) -> VideoLoadResult: """ Load the video from the bytes into a VideoLoadResult object consisting of a VideoNdArray (`VideoLoadResult.video`), an AudioNdArray (`VideoLoadResult.audio`) and an NdArray containing the key frame indices (`VideoLoadResult.key_frame_indices`). --- ```python from docarray import BaseDoc from docarray.typing import VideoUrl import numpy as np class MyDoc(BaseDoc): video_url: VideoUrl doc = MyDoc( video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' ) video, audio, key_frame_indices = doc.video_url.load() assert isinstance(video, np.ndarray) assert isinstance(audio, np.ndarray) assert isinstance(key_frame_indices, np.ndarray) ``` --- :param kwargs: supports all keyword arguments that are being supported by av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open) :return: a VideoLoadResult instance with video, audio and keyframe indices """ import av with av.open(BytesIO(self), **kwargs) as container: audio_frames = [] video_frames = [] keyframe_indices = [] for frame in container.decode(): if type(frame) == av.audio.frame.AudioFrame: audio_frames.append(frame.to_ndarray()) elif type(frame) == av.video.frame.VideoFrame: video_frames.append(frame.to_ndarray(format='rgb24')) if frame.key_frame == 1: curr_index = len(video_frames) keyframe_indices.append(curr_index) if len(audio_frames) == 0: audio = parse_obj_as(AudioNdArray, np.array(audio_frames)) else: audio = parse_obj_as(AudioNdArray, np.stack(audio_frames)) video = parse_obj_as(VideoNdArray, np.stack(video_frames)) indices = parse_obj_as(NdArray, keyframe_indices) return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
import platform from argparse import ArgumentParser import fsspec import huggingface_hub import pandas import pyarrow from datasets import __version__ as version from datasets.commands import BaseDatasetsCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env", help="Print relevant system environment info.") download_parser.set_defaults(func=info_command_factory) def run(self): info = { "`datasets` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "`huggingface_hub` version": huggingface_hub.__version__, "PyArrow version": pyarrow.__version__, "Pandas version": pandas.__version__, "`fsspec` version": fsspec.__version__, } print("\nCopy-and-paste the text below in your GitHub issue.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
import platform from argparse import ArgumentParser import huggingface_hub import pandas import pyarrow from datasets import __version__ as version from datasets.commands import BaseDatasetsCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env", help="Print relevant system environment info.") download_parser.set_defaults(func=info_command_factory) def run(self): info = { "`datasets` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "PyArrow version": pyarrow.__version__, "Pandas version": pandas.__version__, } print("\nCopy-and-paste the text below in your GitHub issue.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
"""Azure Speech tool spec.""" import time from typing import List, Optional from llama_index.core.tools.tool_spec.base import BaseToolSpec class AzureSpeechToolSpec(BaseToolSpec): """Azure Speech tool spec.""" spec_functions = ["speech_to_text", "text_to_speech"] def __init__( self, region: str, speech_key: str, language: Optional[str] = "en-US" ) -> None: import azure.cognitiveservices.speech as speechsdk """Initialize with parameters.""" self.config = speechsdk.SpeechConfig(subscription=speech_key, region=region) self.config.speech_recognition_language = language def text_to_speech(self, text: str) -> None: """ This tool accepts a natural language string and will use Azure speech services to create an audio version of the text, and play it on the users computer. Args: text (str): The text to play """ import azure.cognitiveservices.speech as speechsdk speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.config) result = speech_synthesizer.speak_text(text) if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: speechsdk.AudioDataStream(result) return "Audio playback complete." elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details print(f"Speech synthesis canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: print(f"Error details: {cancellation_details.error_details}") return None return None return None def _transcribe(self, speech_recognizer) -> List[str]: done = False results = [] def stop_cb(evt) -> None: """Callback that stop continuous recognition.""" speech_recognizer.stop_continuous_recognition_async() nonlocal done done = True speech_recognizer.recognized.connect( lambda evt, results=results: results.append(evt.result.text) ) speech_recognizer.session_stopped.connect(stop_cb) speech_recognizer.canceled.connect(stop_cb) # Start continuous speech recognition speech_recognizer.start_continuous_recognition_async() while not done: time.sleep(0.5) return results def speech_to_text(self, filename: str) -> List[str]: """ This tool accepts a filename for a speech audio file and uses Azure to transcribe it into text. Args: filename (str): The name of the file to transcribe """ import azure.cognitiveservices.speech as speechsdk speech_recognizer = speechsdk.SpeechRecognizer( speech_config=self.config, audio_config=speechsdk.audio.AudioConfig(filename=filename), ) return self._transcribe(speech_recognizer)
"""Azure Speech tool spec.""" import time from typing import List, Optional from llama_index.core.tools.tool_spec.base import BaseToolSpec class AzureSpeechToolSpec(BaseToolSpec): """Azure Speech tool spec.""" spec_functions = ["speech_to_text", "text_to_speech"] def __init__( self, region: str, speech_key: str, language: Optional[str] = "en-US" ) -> None: import azure.cognitiveservices.speech as speechsdk """Initialize with parameters.""" self.config = speechsdk.SpeechConfig(subscription=speech_key, region=region) self.config.speech_recognition_language = language def text_to_speech(self, text: str) -> None: """ This tool accepts a natural language string and will use Azure speech services to create an audio version of the text, and play it on the users computer. Args: text (str): The text to play """ import azure.cognitiveservices.speech as speechsdk speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.config) result = speech_synthesizer.speak_text(text) if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: speechsdk.AudioDataStream(result) return "Audio playback complete." elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details print(f"Speech synthesis canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: print(f"Error details: {cancellation_details.error_details}") return None return None return None def _transcribe(self, speech_recognizer) -> List[str]: done = False results = [] def stop_cb(evt) -> None: """Callback that stop continuous recognition.""" speech_recognizer.stop_continuous_recognition_async() nonlocal done done = True speech_recognizer.recognized.connect( lambda evt, results=results: results.append(evt.result.text) ) speech_recognizer.session_stopped.connect(stop_cb) speech_recognizer.canceled.connect(stop_cb) # Start continuous speech recognition speech_recognizer.start_continuous_recognition_async() while not done: time.sleep(0.5) return results def speech_to_text(self, filename: str) -> List[str]: """ This tool accepts a filename for a speech audio file and uses Azure to transcribe it into text. Args: filename (str): The name of the file to transcribe """ import azure.cognitiveservices.speech as speechsdk speech_recognizer = speechsdk.SpeechRecognizer( speech_config=self.config, audio_config=speechsdk.audio.AudioConfig(filename=filename), ) return self._transcribe(speech_recognizer)
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, Type, TypeVar from docarray.typing.abstract_type import AbstractType _PROTO_TYPE_NAME_TO_CLASS: Dict[str, Type[AbstractType]] = {} T = TypeVar('T', bound='AbstractType') def _register_proto( proto_type_name: str, ) -> Callable[[Type[T]], Type[T]]: """Register a new type to be used in the protobuf serialization. This will add the type key to the global registry of types key used in the proto serialization and deserialization. This is for internal usage only. --- ```python from docarray.typing.proto_register import register_proto from docarray.typing.abstract_type import AbstractType @register_proto(proto_type_name='my_type') class MyType(AbstractType): ... ``` --- :param cls: the class to register :return: the class """ if proto_type_name in _PROTO_TYPE_NAME_TO_CLASS.keys(): raise ValueError( f'the key {proto_type_name} is already registered in the global registry' ) def _register(cls: Type[T]) -> Type[T]: cls._proto_type_name = proto_type_name _PROTO_TYPE_NAME_TO_CLASS[proto_type_name] = cls return cls return _register
from typing import Callable, Dict, Type, TypeVar from docarray.typing.abstract_type import AbstractType _PROTO_TYPE_NAME_TO_CLASS: Dict[str, Type[AbstractType]] = {} T = TypeVar('T', bound='AbstractType') def _register_proto( proto_type_name: str, ) -> Callable[[Type[T]], Type[T]]: """Register a new type to be used in the protobuf serialization. This will add the type key to the global registry of types key used in the proto serialization and deserialization. This is for internal usage only. --- ```python from docarray.typing.proto_register import register_proto from docarray.typing.abstract_type import AbstractType @register_proto(proto_type_name='my_type') class MyType(AbstractType): ... ``` --- :param cls: the class to register :return: the class """ if proto_type_name in _PROTO_TYPE_NAME_TO_CLASS.keys(): raise ValueError( f'the key {proto_type_name} is already registered in the global registry' ) def _register(cls: Type[T]) -> Type[T]: cls._proto_type_name = proto_type_name _PROTO_TYPE_NAME_TO_CLASS[proto_type_name] = cls return cls return _register
import os import socket from jina import DocumentArray, Executor, requests class TestExecutor(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from jina.logging.logger import JinaLogger self.logger = JinaLogger(self.__class__.__name__) self._name = self.runtime_args.name @requests(on='/debug') def debug(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) key = 'traversed-executors' for doc in docs: if key not in doc.tags: doc.tags[key] = [] traversed = list(doc.tags.get(key)) traversed.append(self._name) doc.tags[key] = traversed doc.tags['parallel'] = self.runtime_args.replicas doc.tags['shards'] = self.runtime_args.shards doc.tags['shard_id'] = self.runtime_args.shard_id doc.tags['hostname'] = socket.gethostname() @requests(on='/env') def env(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) for doc in docs: doc.tags['k1'] = os.environ.get('k1') doc.tags['k2'] = os.environ.get('k2') doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL') doc.tags['env'] = {'k1': os.environ.get('k1'), 'k2': os.environ.get('k2')} doc.tags['SECRET_USERNAME'] = os.environ.get('SECRET_USERNAME') doc.tags['SECRET_PASSWORD'] = os.environ.get('SECRET_PASSWORD') @requests(on='/cuda') def cuda(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) import kubernetes from kubernetes import client api_client = client.ApiClient() core_client = client.CoreV1Api(api_client=api_client) try: # try loading kube config from disk first kubernetes.config.load_kube_config() except kubernetes.config.config_exception.ConfigException: # if the config could not be read from disk, try loading in cluster config # this works if we are running inside k8s kubernetes.config.load_incluster_config() pods = core_client.list_namespaced_pod('test-gpu') # List[V1Pod] pod_spec = pods[0].spec # V1PodSpec pod_container = pod_spec.containers[0] # V1Container pod_resources = pod_container.resources # V1ResourceRequirements for doc in docs: doc.tags['resources']['limits'] = pod_resources.limits @requests(on='/workspace') def foo_workspace(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) self.logger.debug(f'Workspace {self.workspace}.') for doc in docs: doc.tags['workspace'] = self.workspace
import os from jina import Executor, requests, DocumentArray import socket class TestExecutor(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from jina.logging.logger import JinaLogger self.logger = JinaLogger(self.__class__.__name__) self._name = self.runtime_args.name @requests(on='/debug') def debug(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) key = 'traversed-executors' for doc in docs: if key not in doc.tags: doc.tags[key] = [] traversed = list(doc.tags.get(key)) traversed.append(self._name) doc.tags[key] = traversed doc.tags['parallel'] = self.runtime_args.replicas doc.tags['shards'] = self.runtime_args.shards doc.tags['shard_id'] = self.runtime_args.shard_id doc.tags['hostname'] = socket.gethostname() @requests(on='/env') def env(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) for doc in docs: doc.tags['k1'] = os.environ.get('k1') doc.tags['k2'] = os.environ.get('k2') doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL') doc.tags['env'] = {'k1': os.environ.get('k1'), 'k2': os.environ.get('k2')} doc.tags['SECRET_USERNAME'] = os.environ.get('SECRET_USERNAME') doc.tags['SECRET_PASSWORD'] = os.environ.get('SECRET_PASSWORD') @requests(on='/cuda') def cuda(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) import kubernetes from kubernetes import client api_client = client.ApiClient() core_client = client.CoreV1Api(api_client=api_client) try: # try loading kube config from disk first kubernetes.config.load_kube_config() except kubernetes.config.config_exception.ConfigException: # if the config could not be read from disk, try loading in cluster config # this works if we are running inside k8s kubernetes.config.load_incluster_config() pods = core_client.list_namespaced_pod('test-gpu') # List[V1Pod] pod_spec = pods[0].spec # V1PodSpec pod_container = pod_spec.containers[0] # V1Container pod_resources = pod_container.resources # V1ResourceRequirements for doc in docs: doc.tags['resources']['limits'] = pod_resources.limits @requests(on='/workspace') def foo_workspace(self, docs: DocumentArray, **kwargs): self.logger.debug( f'Received doc array in test-executor {self._name} with length {len(docs)}.' ) self.logger.debug(f'Workspace {self.workspace}.') for doc in docs: doc.tags['workspace'] = self.workspace
from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel, Field, model_validator from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwright.utils import ( aget_current_page, get_current_page, ) if TYPE_CHECKING: pass class ExtractHyperlinksToolInput(BaseModel): """Input for ExtractHyperlinksTool.""" absolute_urls: bool = Field( default=False, description="Return absolute URLs instead of relative URLs", ) class ExtractHyperlinksTool(BaseBrowserTool): """Extract all hyperlinks on the page.""" name: str = "extract_hyperlinks" description: str = "Extract all hyperlinks on the current webpage" args_schema: Type[BaseModel] = ExtractHyperlinksToolInput @model_validator(mode="before") @classmethod def check_bs_import(cls, values: dict) -> Any: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ImportError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values @staticmethod def scrape_page(page: Any, html_content: str, absolute_urls: bool) -> str: from urllib.parse import urljoin from bs4 import BeautifulSoup # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") # Find all the anchor elements and extract their href attributes anchors = soup.find_all("a") if absolute_urls: base_url = page.url links = [urljoin(base_url, anchor.get("href", "")) for anchor in anchors] else: links = [anchor.get("href", "") for anchor in anchors] # Return the list of links as a JSON string. Duplicated link # only appears once in the list return json.dumps(list(set(links))) def _run( self, absolute_urls: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() return self.scrape_page(page, html_content, absolute_urls) async def _arun( self, absolute_urls: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) html_content = await page.content() return self.scrape_page(page, html_content, absolute_urls)
from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel, Field, model_validator from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwright.utils import ( aget_current_page, get_current_page, ) if TYPE_CHECKING: pass class ExtractHyperlinksToolInput(BaseModel): """Input for ExtractHyperlinksTool.""" absolute_urls: bool = Field( default=False, description="Return absolute URLs instead of relative URLs", ) class ExtractHyperlinksTool(BaseBrowserTool): # type: ignore[override, override] """Extract all hyperlinks on the page.""" name: str = "extract_hyperlinks" description: str = "Extract all hyperlinks on the current webpage" args_schema: Type[BaseModel] = ExtractHyperlinksToolInput @model_validator(mode="before") @classmethod def check_bs_import(cls, values: dict) -> Any: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ImportError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values @staticmethod def scrape_page(page: Any, html_content: str, absolute_urls: bool) -> str: from urllib.parse import urljoin from bs4 import BeautifulSoup # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") # Find all the anchor elements and extract their href attributes anchors = soup.find_all("a") if absolute_urls: base_url = page.url links = [urljoin(base_url, anchor.get("href", "")) for anchor in anchors] else: links = [anchor.get("href", "") for anchor in anchors] # Return the list of links as a JSON string. Duplicated link # only appears once in the list return json.dumps(list(set(links))) def _run( self, absolute_urls: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() return self.scrape_page(page, html_content, absolute_urls) async def _arun( self, absolute_urls: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) html_content = await page.content() return self.scrape_page(page, html_content, absolute_urls)