input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
from parameterized import parameterized
from torchaudio.datasets import LibriMix
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
_SAMPLE_RATE = 8000
_TASKS_TO_MIXTURE = {
"sep_clean": "mix_clean",
"enh_single": "mix_single",
"enh_both": "mix_both",
"sep_noisy": "mix_both",
}
def _save_wav(filepath: str, seed: int):
wav = get_whitenoise(
sample_rate=_SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
save_wav(filepath, wav, _SAMPLE_RATE)
return wav
def get_mock_dataset(root_dir: str, num_speaker: int):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
seed = 0
base_dir = os.path.join(root_dir, f"Libri{num_speaker}Mix", "wav8k", "min", "train-360")
os.makedirs(base_dir, exist_ok=True)
for utterance_id in range(10):
filename = f"{utterance_id}.wav"
task_outputs = {}
for task in _TASKS_TO_MIXTURE:
# create mixture folder. The folder names depends on the task.
mixture_folder = _TASKS_TO_MIXTURE[task]
mixture_dir = os.path.join(base_dir, mixture_folder)
os.makedirs(mixture_dir, exist_ok=True)
mixture_path = os.path.join(mixture_dir, filename)
mixture = _save_wav(mixture_path, seed)
sources = []
if task == "enh_both":
sources = [task_outputs["sep_clean"][1]]
else:
for speaker_id in range(num_speaker):
source_dir = os.path.join(base_dir, f"s{speaker_id+1}")
os.makedirs(source_dir, exist_ok=True)
source_path = os.path.join(source_dir, filename)
if os.path.exists(source_path):
sources = task_outputs["sep_clean"][2]
break
else:
source = _save_wav(source_path, seed)
sources.append(source)
seed += 1
task_outputs[task] = (_SAMPLE_RATE, mixture, sources)
mocked_data.append(task_outputs)
return mocked_data
class TestLibriMix(TempDirMixin, TorchaudioTestCase):
root_dir = None
samples_2spk = []
samples_3spk = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples_2spk = get_mock_dataset(cls.root_dir, 2)
cls.samples_3spk = get_mock_dataset(cls.root_dir, 3)
def _test_librimix(self, dataset, samples, task):
num_samples = 0
for i, (sample_rate, mixture, sources) in enumerate(dataset):
assert sample_rate == samples[i][task][0]
self.assertEqual(mixture, samples[i][task][1])
assert len(sources) == len(samples[i][task][2])
for j in range(len(sources)):
self.assertEqual(sources[j], samples[i][task][2][j])
num_samples += 1
assert num_samples == len(samples)
@parameterized.expand([("sep_clean"), ("enh_single",), ("enh_both",), ("sep_noisy",)])
def test_librimix_2speaker(self, task):
dataset = LibriMix(self.root_dir, num_speakers=2, sample_rate=_SAMPLE_RATE, task=task)
self._test_librimix(dataset, self.samples_2spk, task)
@parameterized.expand([("sep_clean"), ("enh_single",), ("enh_both",), ("sep_noisy",)])
def test_librimix_3speaker(self, task):
dataset = LibriMix(self.root_dir, num_speakers=3, sample_rate=_SAMPLE_RATE, task=task)
self._test_librimix(dataset, self.samples_3spk, task)
|
import os
from parameterized import parameterized
from torchaudio.datasets import LibriMix
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
_SAMPLE_RATE = 8000
_TASKS_TO_MIXTURE = {
"sep_clean": "mix_clean",
"enh_single": "mix_single",
"enh_both": "mix_both",
"sep_noisy": "mix_both",
}
def _save_wav(filepath: str, seed: int):
wav = get_whitenoise(
sample_rate=_SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
save_wav(filepath, wav, _SAMPLE_RATE)
return wav
def get_mock_dataset(root_dir: str, num_speaker: int):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
seed = 0
base_dir = os.path.join(root_dir, f"Libri{num_speaker}Mix", "wav8k", "min", "train-360")
os.makedirs(base_dir, exist_ok=True)
for utterance_id in range(10):
filename = f"{utterance_id}.wav"
task_outputs = {}
for task in _TASKS_TO_MIXTURE:
# create mixture folder. The folder names depends on the task.
mixture_folder = _TASKS_TO_MIXTURE[task]
mixture_dir = os.path.join(base_dir, mixture_folder)
os.makedirs(mixture_dir, exist_ok=True)
mixture_path = os.path.join(mixture_dir, filename)
mixture = _save_wav(mixture_path, seed)
sources = []
if task == "enh_both":
sources = [task_outputs["sep_clean"][1]]
else:
for speaker_id in range(num_speaker):
source_dir = os.path.join(base_dir, f"s{speaker_id+1}")
os.makedirs(source_dir, exist_ok=True)
source_path = os.path.join(source_dir, filename)
if os.path.exists(source_path):
sources = task_outputs["sep_clean"][2]
break
else:
source = _save_wav(source_path, seed)
sources.append(source)
seed += 1
task_outputs[task] = (_SAMPLE_RATE, mixture, sources)
mocked_data.append(task_outputs)
return mocked_data
class TestLibriMix(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples_2spk = []
samples_3spk = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples_2spk = get_mock_dataset(cls.root_dir, 2)
cls.samples_3spk = get_mock_dataset(cls.root_dir, 3)
def _test_librimix(self, dataset, samples, task):
num_samples = 0
for i, (sample_rate, mixture, sources) in enumerate(dataset):
assert sample_rate == samples[i][task][0]
self.assertEqual(mixture, samples[i][task][1])
assert len(sources) == len(samples[i][task][2])
for j in range(len(sources)):
self.assertEqual(sources[j], samples[i][task][2][j])
num_samples += 1
assert num_samples == len(samples)
@parameterized.expand([("sep_clean"), ("enh_single",), ("enh_both",), ("sep_noisy",)])
def test_librimix_2speaker(self, task):
dataset = LibriMix(self.root_dir, num_speakers=2, sample_rate=_SAMPLE_RATE, task=task)
self._test_librimix(dataset, self.samples_2spk, task)
@parameterized.expand([("sep_clean"), ("enh_single",), ("enh_both",), ("sep_noisy",)])
def test_librimix_3speaker(self, task):
dataset = LibriMix(self.root_dir, num_speakers=3, sample_rate=_SAMPLE_RATE, task=task)
self._test_librimix(dataset, self.samples_3spk, task)
|
"""Init file of LlamaIndex."""
__version__ = "0.12.18"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.17"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky as cholesky
from keras.src.ops.linalg import det as det
from keras.src.ops.linalg import eig as eig
from keras.src.ops.linalg import eigh as eigh
from keras.src.ops.linalg import inv as inv
from keras.src.ops.linalg import lstsq as lstsq
from keras.src.ops.linalg import lu_factor as lu_factor
from keras.src.ops.linalg import norm as norm
from keras.src.ops.linalg import qr as qr
from keras.src.ops.linalg import solve as solve
from keras.src.ops.linalg import solve_triangular as solve_triangular
from keras.src.ops.linalg import svd as svd
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky
from keras.src.ops.linalg import det
from keras.src.ops.linalg import eig
from keras.src.ops.linalg import eigh
from keras.src.ops.linalg import inv
from keras.src.ops.linalg import lstsq
from keras.src.ops.linalg import lu_factor
from keras.src.ops.linalg import norm
from keras.src.ops.linalg import qr
from keras.src.ops.linalg import solve
from keras.src.ops.linalg import solve_triangular
from keras.src.ops.linalg import svd
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import logging
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Defaults to True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
def init_default_scope(scope: str) -> None:
"""Initialize the given default scope.
Args:
scope (str): The name of the default scope.
"""
never_created = DefaultScope.get_current_instance(
) is None or not DefaultScope.check_instance_created(scope)
if never_created:
DefaultScope.get_instance(scope, scope_name=scope)
return
current_scope = DefaultScope.get_current_instance() # type: ignore
if current_scope.scope_name != scope: # type: ignore
print_log(
'The current default scope ' # type: ignore
f'"{current_scope.scope_name}" is not "{scope}", '
'`init_default_scope` will force set the current'
f'default scope to "{scope}".',
logger='current',
level=logging.WARNING)
# avoid name conflict
new_instance_name = f'{scope}-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name=scope)
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
import warnings
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Defaults to True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
def init_default_scope(scope: str) -> None:
"""Initialize the given default scope.
Args:
scope (str): The name of the default scope.
"""
never_created = DefaultScope.get_current_instance(
) is None or not DefaultScope.check_instance_created(scope)
if never_created:
DefaultScope.get_instance(scope, scope_name=scope)
return
current_scope = DefaultScope.get_current_instance() # type: ignore
if current_scope.scope_name != scope: # type: ignore
warnings.warn('The current default scope ' # type: ignore
f'"{current_scope.scope_name}" is not "{scope}", '
'`init_default_scope` will force set the current'
f'default scope to "{scope}".')
# avoid name conflict
new_instance_name = f'{scope}-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name=scope)
|
from exa_py.api import (
HighlightsContentsOptions,
TextContentsOptions,
)
from langchain_exa.retrievers import ExaSearchRetriever
from langchain_exa.tools import ExaFindSimilarResults, ExaSearchResults
__all__ = [
"ExaFindSimilarResults",
"ExaSearchResults",
"ExaSearchRetriever",
"HighlightsContentsOptions",
"TextContentsOptions",
]
|
from exa_py.api import ( # type: ignore # type: ignore[import-not-found, import-not-found]
HighlightsContentsOptions,
TextContentsOptions,
)
from langchain_exa.retrievers import ExaSearchRetriever
from langchain_exa.tools import ExaFindSimilarResults, ExaSearchResults
__all__ = [
"ExaSearchResults",
"ExaSearchRetriever",
"HighlightsContentsOptions",
"TextContentsOptions",
"ExaFindSimilarResults",
]
|
import os
from unittest.mock import patch
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = "foo"
@pytest.mark.requires("openai")
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={"model": "foo"})
@pytest.mark.requires("openai")
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAIEmbeddings(foo="bar", openai_api_key="foo") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("openai")
def test_embed_documents_with_custom_chunk_size() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
mock_create.assert_any_call(input=[[1342, 19]], **embeddings._invocation_params)
mock_create.assert_any_call(input=[[1342, 19]], **embeddings._invocation_params)
|
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
@pytest.mark.requires("openai")
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={"model": "foo"})
@pytest.mark.requires("openai")
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAIEmbeddings(foo="bar", openai_api_key="foo") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
|
import os
from typing import Any, Dict, Generator, Literal, Optional
import requests
import sseclient
from llama_index.core.llms import (
CompletionResponse,
CompletionResponseGen,
CustomLLM,
LLMMetadata,
)
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.bridge.pydantic import Field
SMART_ENDPOINT = "https://chat-api.you.com/smart"
RESEARCH_ENDPOINT = "https://chat-api.you.com/research"
def _request(base_url: str, api_key: str, **kwargs) -> Dict[str, Any]:
"""
NOTE: This function can be replaced by a OpenAPI-generated Python SDK in the future,
for better input/output typing support.
"""
headers = {"x-api-key": api_key}
response = requests.post(base_url, headers=headers, json=kwargs)
response.raise_for_status()
return response.json()
def _request_stream(
base_url: str, api_key: str, **kwargs
) -> Generator[str, None, None]:
headers = {"x-api-key": api_key}
params = dict(**kwargs, stream=True)
response = requests.post(base_url, headers=headers, stream=True, json=params)
response.raise_for_status()
client = sseclient.SSEClient(response)
for event in client.events():
if event.event in ("search_results", "done"):
pass
elif event.event == "token":
yield event.data
elif event.event == "error":
raise ValueError(f"Error in response: {event.data}")
else:
raise NotImplementedError(f"Unknown event type {event.event}")
class You(CustomLLM):
"""
Wrapper around You.com's conversational Smart and Research APIs.
Each API endpoint is designed to generate conversational
responses to a variety of query types, including inline citations
and web results when relevant.
Smart Mode:
- Quick, reliable answers for a variety of questions
- Cites the entire web page URL
Research Mode:
- In-depth answers with extensive citations for a variety of questions
- Cites the specific web page snippet relevant to the claim
To connect to the You.com api requires an API key which
you can get at https://api.you.com.
For more information, check out the documentations at
https://documentation.you.com/api-reference/.
Args:
mode: You.com conversational endpoints. Choose from "smart" or "research"
ydc_api_key: You.com API key, if `YDC_API_KEY` is not set in the environment
"""
mode: Literal["smart", "research"] = Field(
"smart",
description='You.com conversational endpoints. Choose from "smart" or "research"',
)
ydc_api_key: Optional[str] = Field(
None,
description="You.com API key, if `YDC_API_KEY` is not set in the envrioment",
)
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
model_name=f"you.com-{self.mode}",
is_chat_model=True,
is_function_calling_model=False,
)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
response = _request(
self.endpoint,
api_key=self._api_key,
query=prompt,
)
return CompletionResponse(text=response["answer"], raw=response)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
response = _request_stream(
self.endpoint,
api_key=self._api_key,
query=prompt,
)
completion = ""
for token in response:
completion += token
yield CompletionResponse(text=completion, delta=token)
@property
def endpoint(self) -> str:
if self.mode == "smart":
return SMART_ENDPOINT
return RESEARCH_ENDPOINT
@property
def _api_key(self) -> str:
return self.ydc_api_key or os.environ["YDC_API_KEY"]
|
import os
from typing import Any, Dict, Generator, Literal, Optional
import requests
import sseclient
from llama_index.core.llms import (
CompletionResponse,
CompletionResponseGen,
CustomLLM,
LLMMetadata,
)
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.bridge.pydantic import Field
SMART_ENDPOINT = "https://chat-api.you.com/smart"
RESEARCH_ENDPOINT = "https://chat-api.you.com/research"
def _request(base_url: str, api_key: str, **kwargs) -> Dict[str, Any]:
"""
NOTE: This function can be replaced by a OpenAPI-generated Python SDK in the future,
for better input/output typing support.
"""
headers = {"x-api-key": api_key}
response = requests.post(base_url, headers=headers, json=kwargs)
response.raise_for_status()
return response.json()
def _request_stream(
base_url: str, api_key: str, **kwargs
) -> Generator[str, None, None]:
headers = {"x-api-key": api_key}
params = dict(**kwargs, stream=True)
response = requests.post(base_url, headers=headers, stream=True, json=params)
response.raise_for_status()
client = sseclient.SSEClient(response)
for event in client.events():
if event.event in ("search_results", "done"):
pass
elif event.event == "token":
yield event.data
elif event.event == "error":
raise ValueError(f"Error in response: {event.data}")
else:
raise NotImplementedError(f"Unknown event type {event.event}")
class You(CustomLLM):
"""Wrapper around You.com's conversational Smart and Research APIs.
Each API endpoint is designed to generate conversational
responses to a variety of query types, including inline citations
and web results when relevant.
Smart Mode:
- Quick, reliable answers for a variety of questions
- Cites the entire web page URL
Research Mode:
- In-depth answers with extensive citations for a variety of questions
- Cites the specific web page snippet relevant to the claim
To connect to the You.com api requires an API key which
you can get at https://api.you.com.
For more information, check out the documentations at
https://documentation.you.com/api-reference/.
Args:
mode: You.com conversational endpoints. Choose from "smart" or "research"
ydc_api_key: You.com API key, if `YDC_API_KEY` is not set in the environment
"""
mode: Literal["smart", "research"] = Field(
"smart",
description='You.com conversational endpoints. Choose from "smart" or "research"',
)
ydc_api_key: Optional[str] = Field(
None,
description="You.com API key, if `YDC_API_KEY` is not set in the envrioment",
)
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
model_name=f"you.com-{self.mode}",
is_chat_model=True,
is_function_calling_model=False,
)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
response = _request(
self.endpoint,
api_key=self._api_key,
query=prompt,
)
return CompletionResponse(text=response["answer"], raw=response)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
response = _request_stream(
self.endpoint,
api_key=self._api_key,
query=prompt,
)
completion = ""
for token in response:
completion += token
yield CompletionResponse(text=completion, delta=token)
@property
def endpoint(self) -> str:
if self.mode == "smart":
return SMART_ENDPOINT
return RESEARCH_ENDPOINT
@property
def _api_key(self) -> str:
return self.ydc_api_key or os.environ["YDC_API_KEY"]
|
from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from docarray import Document
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
for value in values:
self.append(value)
|
from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from docarray import Document
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document']) -> None:
for value in values:
self.append(value)
|
import pathlib
from argparse import ArgumentParser
from lightning import ConformerRNNTModule, get_data_module
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DDPPlugin
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
model = ConformerRNNTModule(str(args.sp_model_path))
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
import pathlib
from argparse import ArgumentParser
from lightning import ConformerRNNTModule, get_data_module
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
model = ConformerRNNTModule(str(args.sp_model_path))
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
import datetime
import autogpt_libs.auth as autogpt_auth_lib
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
import backend.server.v2.library.model as library_model
from backend.server.v2.library.routes import router as library_router
app = fastapi.FastAPI()
app.include_router(library_router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_auth_lib.auth_middleware] = override_auth_middleware
app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_user_id
def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
mocked_value = [
library_model.LibraryAgent(
id="test-agent-1",
agent_id="test-agent-1",
agent_version=1,
preset_id="preset-1",
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
is_favorite=False,
is_created_by_user=True,
is_latest_version=True,
name="Test Agent 1",
description="Test Description 1",
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
library_model.LibraryAgent(
id="test-agent-2",
agent_id="test-agent-2",
agent_version=1,
preset_id="preset-2",
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
is_favorite=False,
is_created_by_user=False,
is_latest_version=True,
name="Test Agent 2",
description="Test Description 2",
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
]
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.return_value = mocked_value
response = client.get("/agents")
assert response.status_code == 200
data = [
library_model.LibraryAgent.model_validate(agent) for agent in response.json()
]
assert len(data) == 2
assert data[0].agent_id == "test-agent-1"
assert data[0].is_created_by_user is True
assert data[1].agent_id == "test-agent-2"
assert data[1].is_created_by_user is False
mock_db_call.assert_called_once_with("test-user-id")
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.side_effect = Exception("Test error")
response = client.get("/agents")
assert response.status_code == 500
mock_db_call.assert_called_once_with("test-user-id")
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.return_value = None
response = client.post("/agents/test-version-id")
assert response.status_code == 201
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.side_effect = Exception("Test error")
response = client.post("/agents/test-version-id")
assert response.status_code == 500
assert response.json()["detail"] == "Failed to add agent to library"
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
|
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
app = fastapi.FastAPI()
app.include_router(backend.server.v2.library.routes.router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
mocked_value = [
backend.server.v2.library.model.LibraryAgent(
id="test-agent-1",
version=1,
is_active=True,
name="Test Agent 1",
description="Test Description 1",
isCreatedByUser=True,
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
backend.server.v2.library.model.LibraryAgent(
id="test-agent-2",
version=1,
is_active=True,
name="Test Agent 2",
description="Test Description 2",
isCreatedByUser=False,
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
]
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.return_value = mocked_value
response = client.get("/agents")
assert response.status_code == 200
data = [
backend.server.v2.library.model.LibraryAgent.model_validate(agent)
for agent in response.json()
]
assert len(data) == 2
assert data[0].id == "test-agent-1"
assert data[0].isCreatedByUser is True
assert data[1].id == "test-agent-2"
assert data[1].isCreatedByUser is False
mock_db_call.assert_called_once_with("test-user-id")
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.side_effect = Exception("Test error")
response = client.get("/agents")
assert response.status_code == 500
mock_db_call.assert_called_once_with("test-user-id")
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.return_value = None
response = client.post("/agents/test-version-id")
assert response.status_code == 201
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.side_effect = Exception("Test error")
response = client.post("/agents/test-version-id")
assert response.status_code == 500
assert response.json()["detail"] == "Failed to add agent to library"
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import pairwise_cos_sim, pairwise_euclidean_sim, pairwise_manhattan_sim
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - pairwise_cos_sim(x, y)
EUCLIDEAN = lambda x, y: pairwise_euclidean_sim(x, y)
MANHATTAN = lambda x, y: pairwise_manhattan_sim(x, y)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
"""
Compute the CoSENT loss from embeddings.
Args:
embeddings: List of embeddings
Returns:
Loss value
"""
rep_anchor, rep_pos, rep_neg = embeddings
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"TripletDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import pairwise_cos_sim, pairwise_euclidean_sim, pairwise_manhattan_sim
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - pairwise_cos_sim(x, y)
EUCLIDEAN = lambda x, y: pairwise_euclidean_sim(x, y)
MANHATTAN = lambda x, y: pairwise_manhattan_sim(x, y)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"TripletDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
"""Run smoke tests"""
import torchaudio # noqa: F401
import torchaudio.compliance.kaldi # noqa: F401
import torchaudio.datasets # noqa: F401
import torchaudio.functional # noqa: F401
import torchaudio.models # noqa: F401
import torchaudio.pipelines # noqa: F401
import torchaudio.sox_effects # noqa: F401
import torchaudio.transforms # noqa: F401
import torchaudio.utils # noqa: F401
from torchaudio.prototype.io import Streamer # noqa: F401
|
"""Run smoke tests"""
import torchaudio # noqa: F401
import torchaudio.compliance.kaldi # noqa: F401
import torchaudio.datasets # noqa: F401
import torchaudio.functional # noqa: F401
import torchaudio.models # noqa: F401
import torchaudio.pipelines # noqa: F401
import torchaudio.sox_effects # noqa: F401
import torchaudio.transforms # noqa: F401
import torchaudio.utils # noqa: F401
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
|
# coding=utf-8
# Copyright 2025 The rednote-hilab team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...modeling_outputs import CausalLMOutputWithPast
from ...processing_utils import Unpack
from ...utils import logging
from ..deepseek_v3.modeling_deepseek_v3 import (
DeepseekV3DecoderLayer,
DeepseekV3MLP,
DeepseekV3MoE,
DeepseekV3PreTrainedModel,
DeepseekV3TopkRouter,
)
from ..qwen3.modeling_qwen3 import (
Qwen3Attention,
Qwen3ForCausalLM,
Qwen3Model,
Qwen3RMSNorm,
Qwen3RotaryEmbedding,
TransformersKwargs,
)
from .configuration_dots1 import Dots1Config
logger = logging.get_logger(__name__)
class Dots1RMSNorm(Qwen3RMSNorm):
pass
class Dots1RotaryEmbedding(Qwen3RotaryEmbedding):
pass
class Dots1Attention(Qwen3Attention):
pass
class Dots1MLP(DeepseekV3MLP):
pass
class Dots1MoE(DeepseekV3MoE):
pass
class Dots1TopkRouter(DeepseekV3TopkRouter):
pass
class Dots1DecoderLayer(DeepseekV3DecoderLayer):
def __init__(self, config: Dots1Config, layer_idx: int):
super().__init__()
self.attention_type = config.layer_types[layer_idx]
class Dots1PreTrainedModel(DeepseekV3PreTrainedModel):
pass
class Dots1Model(Qwen3Model):
pass
class Dots1ForCausalLM(Qwen3ForCausalLM):
def forward(
self,
**super_kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Dots1ForCausalLM
>>> model = Dots1ForCausalLM.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> tokenizer = AutoTokenizer.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
__all__ = [
"Dots1PreTrainedModel",
"Dots1Model",
"Dots1ForCausalLM",
]
|
# coding=utf-8
# Copyright 2025 The rednote-hilab team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...modeling_outputs import CausalLMOutputWithPast
from ...processing_utils import Unpack
from ...utils import logging
from ..deepseek_v3.modeling_deepseek_v3 import (
DeepseekV3DecoderLayer,
DeepseekV3MLP,
DeepseekV3MoE,
DeepseekV3PreTrainedModel,
DeepseekV3TopkRouter,
)
from ..qwen3.modeling_qwen3 import (
KwargsForCausalLM,
Qwen3Attention,
Qwen3ForCausalLM,
Qwen3Model,
Qwen3RMSNorm,
Qwen3RotaryEmbedding,
)
from .configuration_dots1 import Dots1Config
logger = logging.get_logger(__name__)
class Dots1RMSNorm(Qwen3RMSNorm):
pass
class Dots1RotaryEmbedding(Qwen3RotaryEmbedding):
pass
class Dots1Attention(Qwen3Attention):
pass
class Dots1MLP(DeepseekV3MLP):
pass
class Dots1MoE(DeepseekV3MoE):
pass
class Dots1TopkRouter(DeepseekV3TopkRouter):
pass
class Dots1DecoderLayer(DeepseekV3DecoderLayer):
def __init__(self, config: Dots1Config, layer_idx: int):
super().__init__()
self.attention_type = config.layer_types[layer_idx]
class Dots1PreTrainedModel(DeepseekV3PreTrainedModel):
pass
class Dots1Model(Qwen3Model):
pass
class Dots1ForCausalLM(Qwen3ForCausalLM):
def forward(
self,
**super_kwargs: Unpack[KwargsForCausalLM],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Dots1ForCausalLM
>>> model = Dots1ForCausalLM.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> tokenizer = AutoTokenizer.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
__all__ = [
"Dots1PreTrainedModel",
"Dots1Model",
"Dots1ForCausalLM",
]
|
from __future__ import annotations
import pytest
from sentence_transformers.cross_encoder import CrossEncoder
@pytest.mark.parametrize(
"model_name, expected_score",
[
("cross-encoder/ms-marco-MiniLM-L6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]),
("cross-encoder/ms-marco-TinyBERT-L2-v2", [8.142767906188965, 1.2057735919952393, -2.7283530235290527]),
("cross-encoder/stsb-distilroberta-base", [0.4977430999279022, 0.255491703748703, 0.28261035680770874]),
("mixedbread-ai/mxbai-rerank-xsmall-v1", [0.9224735498428345, 0.04793589934706688, 0.03315146267414093]),
],
)
def test_pretrained_model(model_name: str, expected_score: list[float]) -> None:
# Ensure that pretrained models are not accidentally changed
model = CrossEncoder(model_name)
query = "is toprol xl the same as metoprolol?"
answers = [
"Metoprolol succinate is also known by the brand name Toprol XL. It is the extended-release form of metoprolol. Metoprolol succinate is approved to treat high blood pressure, chronic chest pain, and congestive heart failure.",
"Pill with imprint 1 is White, Round and has been identified as Metoprolol Tartrate 25 mg.",
"Interactions between your drugs No interactions were found between Allergy Relief and metoprolol. This does not necessarily mean no interactions exist. Always consult your healthcare provider.",
]
scores = model.predict([(query, answer) for answer in answers])
assert scores.tolist() == pytest.approx(expected_score, rel=1e-4)
|
from __future__ import annotations
import pytest
from sentence_transformers.cross_encoder import CrossEncoder
@pytest.mark.parametrize(
"model_name, expected_score",
[
("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]),
("cross-encoder/ms-marco-TinyBERT-L-2-v2", [8.142767906188965, 1.2057735919952393, -2.7283530235290527]),
("cross-encoder/stsb-distilroberta-base", [0.4977430999279022, 0.255491703748703, 0.28261035680770874]),
("mixedbread-ai/mxbai-rerank-xsmall-v1", [0.9224735498428345, 0.04793589934706688, 0.03315146267414093]),
],
)
def test_pretrained_model(model_name: str, expected_score: list[float]) -> None:
# Ensure that pretrained models are not accidentally changed
model = CrossEncoder(model_name)
query = "is toprol xl the same as metoprolol?"
answers = [
"Metoprolol succinate is also known by the brand name Toprol XL. It is the extended-release form of metoprolol. Metoprolol succinate is approved to treat high blood pressure, chronic chest pain, and congestive heart failure.",
"Pill with imprint 1 is White, Round and has been identified as Metoprolol Tartrate 25 mg.",
"Interactions between your drugs No interactions were found between Allergy Relief and metoprolol. This does not necessarily mean no interactions exist. Always consult your healthcare provider.",
]
scores = model.predict([(query, answer) for answer in answers])
assert scores.tolist() == pytest.approx(expected_score, rel=1e-4)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.optimizers import legacy as legacy
from keras.optimizers import schedules as schedules
from keras.src.optimizers import deserialize as deserialize
from keras.src.optimizers import get as get
from keras.src.optimizers import serialize as serialize
from keras.src.optimizers.adadelta import Adadelta as Adadelta
from keras.src.optimizers.adafactor import Adafactor as Adafactor
from keras.src.optimizers.adagrad import Adagrad as Adagrad
from keras.src.optimizers.adam import Adam as Adam
from keras.src.optimizers.adamax import Adamax as Adamax
from keras.src.optimizers.adamw import AdamW as AdamW
from keras.src.optimizers.ftrl import Ftrl as Ftrl
from keras.src.optimizers.lamb import Lamb as Lamb
from keras.src.optimizers.lion import Lion as Lion
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
from keras.src.optimizers.muon import Muon as Muon
from keras.src.optimizers.nadam import Nadam as Nadam
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.optimizers.rmsprop import RMSprop as RMSprop
from keras.src.optimizers.sgd import SGD as SGD
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import serialize
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lamb import Lamb
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.muon import Muon
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode(sentences, batch_size=encode_batch_size, pool=pool, chunk_size=chunk_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDoc):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDocument):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .LambdaLoss import (
LambdaLoss,
LambdaRankScheme,
NDCGLoss1Scheme,
NDCGLoss2PPScheme,
NDCGLoss2Scheme,
NoWeightingScheme,
)
from .ListNetLoss import ListNetLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
"ListNetLoss",
"LambdaLoss",
"NoWeightingScheme",
"NDCGLoss1Scheme",
"NDCGLoss2Scheme",
"LambdaRankScheme",
"NDCGLoss2PPScheme",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .ListNetLoss import ListNetLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
"ListNetLoss",
]
|
import importlib
from typing import Any
from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline
from langchain.retrievers.document_compressors.chain_extract import (
LLMChainExtractor,
)
from langchain.retrievers.document_compressors.chain_filter import (
LLMChainFilter,
)
from langchain.retrievers.document_compressors.cohere_rerank import CohereRerank
from langchain.retrievers.document_compressors.cross_encoder_rerank import (
CrossEncoderReranker,
)
from langchain.retrievers.document_compressors.embeddings_filter import (
EmbeddingsFilter,
)
from langchain.retrievers.document_compressors.listwise_rerank import (
LLMListwiseRerank,
)
_module_lookup = {
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"DocumentCompressorPipeline",
"EmbeddingsFilter",
"FlashrankRerank",
"LLMListwiseRerank",
"LLMChainExtractor",
"LLMChainFilter",
"CohereRerank",
"CrossEncoderReranker",
]
|
import importlib
from typing import Any
from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline
from langchain.retrievers.document_compressors.chain_extract import (
LLMChainExtractor,
)
from langchain.retrievers.document_compressors.chain_filter import (
LLMChainFilter,
)
from langchain.retrievers.document_compressors.cohere_rerank import CohereRerank
from langchain.retrievers.document_compressors.cross_encoder_rerank import (
CrossEncoderReranker,
)
from langchain.retrievers.document_compressors.embeddings_filter import (
EmbeddingsFilter,
)
from langchain.retrievers.document_compressors.listwise_rerank import (
LLMListwiseRerank,
)
_module_lookup = {
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"DocumentCompressorPipeline",
"EmbeddingsFilter",
"LLMListwiseRerank",
"LLMChainExtractor",
"LLMChainFilter",
"CohereRerank",
"CrossEncoderReranker",
] + list(_module_lookup.keys())
|
import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
NdArray,
Tensor,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: Tensor
generic_torch_tensor: Tensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
NdArray,
Tensor,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: Tensor
generic_torch_tensor: Tensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(max_epochs=50)
evaluation = dict(interval=2)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(max_epochs=50)
evaluation = dict(interval=2)
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/")
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDocument):
text: str
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument, Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/")
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDocument):
text: str
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
|
_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './atss_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='RandomResize', img_scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='RandomResize', img_scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# TODO: use repeat dataset wrapper
# training schedule for 3x
train_cfg = dict(by_epoch=True, max_epochs=36)
val_cfg = dict(interval=3)
test_cfg = dict()
# learning rate
# Experiments show that using milestones=[27, 33] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
"""Document transformers."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
from langchain_core.runnables.config import run_in_executor
if TYPE_CHECKING:
from collections.abc import Sequence
from langchain_core.documents import Document
class BaseDocumentTransformer(ABC):
"""Abstract base class for document transformation.
A document transformation takes a sequence of Documents and returns a
sequence of transformed Documents.
Example:
.. code-block:: python
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
class Config:
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
""" # noqa: E501
@abstractmethod
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
return await run_in_executor(
None, self.transform_documents, documents, **kwargs
)
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
from langchain_core.runnables.config import run_in_executor
if TYPE_CHECKING:
from collections.abc import Sequence
from langchain_core.documents import Document
class BaseDocumentTransformer(ABC):
"""Abstract base class for document transformation.
A document transformation takes a sequence of Documents and returns a
sequence of transformed Documents.
Example:
.. code-block:: python
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
class Config:
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
""" # noqa: E501
@abstractmethod
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
return await run_in_executor(
None, self.transform_documents, documents, **kwargs
)
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.8.0"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.7.0"
@keras_export("keras.version")
def version():
return __version__
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import gzip
import json
import os
import time
import torch
from sentence_transformers import SentenceTransformer, util
# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search
model_name = "nq-distilbert-base-v1"
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = "data/simplewiki-2020-11-01.jsonl.gz"
if not os.path.exists(wikipedia_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz", wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data["paragraphs"]:
# We encode the passages as [title, text]
passages.append([data["title"], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == "nq-distilbert-base-v1":
embeddings_filepath = "simplewiki-2020-11-01-nq-distilbert-base-v1.pt"
if not os.path.exists(embeddings_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt", embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
device = util.get_device_name()
corpus_embeddings = corpus_embeddings.to(device)
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits:
print("\t{:.3f}\t{}".format(hit["score"], passages[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import json
from sentence_transformers import SentenceTransformer, util
import time
import gzip
import os
import torch
# We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
model_name = 'nq-distilbert-base-v1'
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = 'data/simplewiki-2020-11-01.jsonl.gz'
if not os.path.exists(wikipedia_filepath):
util.http_get('http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz', wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, 'rt', encoding='utf8') as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data['paragraphs']:
# We encode the passages as [title, text]
passages.append([data['title'], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == 'nq-distilbert-base-v1':
embeddings_filepath = 'simplewiki-2020-11-01-nq-distilbert-base-v1.pt'
if not os.path.exists(embeddings_filepath):
util.http_get('http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt', embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
if torch.cuda.is_available():
corpus_embeddings = corpus_embeddings.to('cuda')
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits:
print("\t{:.3f}\t{}".format(hit['score'], passages[hit['corpus_id']]))
print("\n\n========\n")
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torch
import torchaudio
@torchaudio._extension.fail_if_no_sox
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_seed(seed)
@torchaudio._extension.fail_if_no_sox
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
@torchaudio._extension.fail_if_no_sox
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size)
@torchaudio._extension.fail_if_no_sox
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_use_threads(use_threads)
@torchaudio._extension.fail_if_no_sox
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torch.ops.torchaudio.sox_utils_list_effects())
@torchaudio._extension.fail_if_no_sox
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_read_formats()
@torchaudio._extension.fail_if_no_sox
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_write_formats()
@torchaudio._extension.fail_if_no_sox
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torch.ops.torchaudio.sox_utils_get_buffer_size()
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torch
from torchaudio._internal import module_utils as _mod_utils
@_mod_utils.requires_sox()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_seed(seed)
@_mod_utils.requires_sox()
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
@_mod_utils.requires_sox()
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size)
@_mod_utils.requires_sox()
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_use_threads(use_threads)
@_mod_utils.requires_sox()
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torch.ops.torchaudio.sox_utils_list_effects())
@_mod_utils.requires_sox()
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_read_formats()
@_mod_utils.requires_sox()
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_write_formats()
@_mod_utils.requires_sox()
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torch.ops.torchaudio.sox_utils_get_buffer_size()
|
import torch
__all__ = ["DeepSpeech"]
class FullyConnected(torch.nn.Module):
"""
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
"""
def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None:
super(FullyConnected, self).__init__()
self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True)
self.relu_max_clip = relu_max_clip
self.dropout = dropout
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.fc(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip)
if self.dropout:
x = torch.nn.functional.dropout(x, self.dropout, self.training)
return x
class DeepSpeech(torch.nn.Module):
"""
DeepSpeech model architecture from *Deep Speech: Scaling up end-to-end speech recognition*
:cite:`hannun2014deep`.
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
n_class: Number of output classes
"""
def __init__(
self,
n_feature: int,
n_hidden: int = 2048,
n_class: int = 40,
dropout: float = 0.0,
) -> None:
super(DeepSpeech, self).__init__()
self.n_hidden = n_hidden
self.fc1 = FullyConnected(n_feature, n_hidden, dropout)
self.fc2 = FullyConnected(n_hidden, n_hidden, dropout)
self.fc3 = FullyConnected(n_hidden, n_hidden, dropout)
self.bi_rnn = torch.nn.RNN(n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True)
self.fc4 = FullyConnected(n_hidden, n_hidden, dropout)
self.out = torch.nn.Linear(n_hidden, n_class)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): Tensor of dimension (batch, channel, time, feature).
Returns:
Tensor: Predictor tensor of dimension (batch, time, class).
"""
# N x C x T x F
x = self.fc1(x)
# N x C x T x H
x = self.fc2(x)
# N x C x T x H
x = self.fc3(x)
# N x C x T x H
x = x.squeeze(1)
# N x T x H
x = x.transpose(0, 1)
# T x N x H
x, _ = self.bi_rnn(x)
# The fifth (non-recurrent) layer takes both the forward and backward units as inputs
x = x[:, :, : self.n_hidden] + x[:, :, self.n_hidden :]
# T x N x H
x = self.fc4(x)
# T x N x H
x = self.out(x)
# T x N x n_class
x = x.permute(1, 0, 2)
# N x T x n_class
x = torch.nn.functional.log_softmax(x, dim=2)
# N x T x n_class
return x
|
import torch
__all__ = ["DeepSpeech"]
class FullyConnected(torch.nn.Module):
"""
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
"""
def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None:
super(FullyConnected, self).__init__()
self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True)
self.relu_max_clip = relu_max_clip
self.dropout = dropout
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.fc(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip)
if self.dropout:
x = torch.nn.functional.dropout(x, self.dropout, self.training)
return x
class DeepSpeech(torch.nn.Module):
"""
DeepSpeech model architecture from *Deep Speech: Scaling up end-to-end speech recognition*
[:footcite:`hannun2014deep`].
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
n_class: Number of output classes
"""
def __init__(
self,
n_feature: int,
n_hidden: int = 2048,
n_class: int = 40,
dropout: float = 0.0,
) -> None:
super(DeepSpeech, self).__init__()
self.n_hidden = n_hidden
self.fc1 = FullyConnected(n_feature, n_hidden, dropout)
self.fc2 = FullyConnected(n_hidden, n_hidden, dropout)
self.fc3 = FullyConnected(n_hidden, n_hidden, dropout)
self.bi_rnn = torch.nn.RNN(n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True)
self.fc4 = FullyConnected(n_hidden, n_hidden, dropout)
self.out = torch.nn.Linear(n_hidden, n_class)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): Tensor of dimension (batch, channel, time, feature).
Returns:
Tensor: Predictor tensor of dimension (batch, time, class).
"""
# N x C x T x F
x = self.fc1(x)
# N x C x T x H
x = self.fc2(x)
# N x C x T x H
x = self.fc3(x)
# N x C x T x H
x = x.squeeze(1)
# N x T x H
x = x.transpose(0, 1)
# T x N x H
x, _ = self.bi_rnn(x)
# The fifth (non-recurrent) layer takes both the forward and backward units as inputs
x = x[:, :, : self.n_hidden] + x[:, :, self.n_hidden :]
# T x N x H
x = self.fc4(x)
# T x N x H
x = self.out(x)
# T x N x n_class
x = x.permute(1, 0, 2)
# N x T x n_class
x = torch.nn.functional.log_softmax(x, dim=2)
# N x T x n_class
return x
|
from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding, TorchEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import Tensor
from docarray.typing.tensor.torch_tensor import TorchTensor
__all__ = [
'NdArray',
'TorchTensor',
'Tensor',
'Embedding',
'TorchEmbedding',
'NdArrayEmbedding',
]
|
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import Tensor
from docarray.typing.tensor.torch_tensor import TorchTensor
__all__ = ['NdArray', 'TorchTensor', 'Tensor']
|
from .normalizer import ImageNormalizer
|
from .normalizer import ImageNormalizer
|
# Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from ..utils import deprecate, logging
from .controlnets.controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class FluxControlNetOutput(FluxControlNetOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxControlNetOutput` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetOutput`, instead."
deprecate("diffusers.models.controlnet_flux.FluxControlNetOutput", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
class FluxControlNetModel(FluxControlNetModel):
def __init__(
self,
patch_size: int = 1,
in_channels: int = 64,
num_layers: int = 19,
num_single_layers: int = 38,
attention_head_dim: int = 128,
num_attention_heads: int = 24,
joint_attention_dim: int = 4096,
pooled_projection_dim: int = 768,
guidance_embeds: bool = False,
axes_dims_rope: List[int] = [16, 56, 56],
num_mode: int = None,
conditioning_embedding_channels: int = None,
):
deprecation_message = "Importing `FluxControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetModel`, instead."
deprecate("diffusers.models.controlnet_flux.FluxControlNetModel", "0.34", deprecation_message)
super().__init__(
patch_size=patch_size,
in_channels=in_channels,
num_layers=num_layers,
num_single_layers=num_single_layers,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
joint_attention_dim=joint_attention_dim,
pooled_projection_dim=pooled_projection_dim,
guidance_embeds=guidance_embeds,
axes_dims_rope=axes_dims_rope,
num_mode=num_mode,
conditioning_embedding_channels=conditioning_embedding_channels,
)
class FluxMultiControlNetModel(FluxMultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxMultiControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxMultiControlNetModel`, instead."
deprecate("diffusers.models.controlnet_flux.FluxMultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from ..utils import deprecate, logging
from .controlnets.controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class FluxControlNetOutput(FluxControlNetOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxControlNetOutput` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetOutput`, instead."
deprecate("diffusers.models.controlnet_flux.FluxControlNetOutput", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
class FluxControlNetModel(FluxControlNetModel):
def __init__(
self,
patch_size: int = 1,
in_channels: int = 64,
num_layers: int = 19,
num_single_layers: int = 38,
attention_head_dim: int = 128,
num_attention_heads: int = 24,
joint_attention_dim: int = 4096,
pooled_projection_dim: int = 768,
guidance_embeds: bool = False,
axes_dims_rope: List[int] = [16, 56, 56],
num_mode: int = None,
conditioning_embedding_channels: int = None,
):
deprecation_message = "Importing `FluxControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetModel`, instead."
deprecate("diffusers.models.controlnet_flux.FluxControlNetModel", "0.34", deprecation_message)
super().__init__(
patch_size=patch_size,
in_channels=in_channels,
num_layers=num_layers,
num_single_layers=num_single_layers,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
joint_attention_dim=joint_attention_dim,
pooled_projection_dim=pooled_projection_dim,
guidance_embeds=guidance_embeds,
axes_dims_rope=axes_dims_rope,
num_mode=num_mode,
conditioning_embedding_channels=conditioning_embedding_channels,
)
class FluxMultiControlNetModel(FluxMultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxMultiControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxMultiControlNetModel`, instead."
deprecate("diffusers.models.controlnet_flux.FluxMultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
Simple web page reader.
Reads pages from the web.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a URL and returns a dictionary of metadata.
Default is None.
"""
is_remote: bool = True
html_to_text: bool
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
html_to_text: bool = False,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
super().__init__(html_to_text=html_to_text)
self._metadata_fn = metadata_fn
@classmethod
def class_name(cls) -> str:
return "SimpleWebPageReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
response = requests.get(url, headers=None).text
if self.html_to_text:
import html2text
response = html2text.html2text(response)
metadata: Optional[Dict] = None
if self._metadata_fn is not None:
metadata = self._metadata_fn(url)
documents.append(Document(text=response, id_=url, metadata=metadata or {}))
return documents
|
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""Simple web page reader.
Reads pages from the web.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a URL and returns a dictionary of metadata.
Default is None.
"""
is_remote: bool = True
html_to_text: bool
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
html_to_text: bool = False,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
super().__init__(html_to_text=html_to_text)
self._metadata_fn = metadata_fn
@classmethod
def class_name(cls) -> str:
return "SimpleWebPageReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""Load data from the input directory.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
response = requests.get(url, headers=None).text
if self.html_to_text:
import html2text
response = html2text.html2text(response)
metadata: Optional[Dict] = None
if self._metadata_fn is not None:
metadata = self._metadata_fn(url)
documents.append(Document(text=response, id_=url, metadata=metadata or {}))
return documents
|
"""Sentence window retriever."""
from typing import Any, Dict, List
from llama_index.core import Settings, VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import (
SentenceWindowNodeParser,
)
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
from llama_index.core.schema import Document
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
class SentenceWindowRetrieverPack(BaseLlamaPack):
"""
Sentence Window Retriever pack.
Build input nodes from a text file by inserting metadata,
build a vector index over the input nodes,
then after retrieval insert the text into the output nodes
before synthesis.
"""
def __init__(
self,
docs: List[Document] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# create the sentence window node parser w/ default settings
self.node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
self.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
self.embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-mpnet-base-v2", max_length=512
)
Settings.llm = self.llm
Settings.embed_model = self.embed_model
# extract nodes
nodes = self.node_parser.get_nodes_from_documents(docs)
self.sentence_index = VectorStoreIndex(nodes)
self.postprocessor = MetadataReplacementPostProcessor(
target_metadata_key="window"
)
self.query_engine = self.sentence_index.as_query_engine(
similarity_top_k=2,
# the target key defaults to `window` to match the node_parser's default
node_postprocessors=[self.postprocessor],
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"sentence_index": self.sentence_index,
"node_parser": self.node_parser,
"postprocessor": self.postprocessor,
"llm": self.llm,
"embed_model": self.embed_model,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Sentence window retriever."""
from typing import Any, Dict, List
from llama_index.core import Settings, VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import (
SentenceWindowNodeParser,
)
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
from llama_index.core.schema import Document
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
class SentenceWindowRetrieverPack(BaseLlamaPack):
"""Sentence Window Retriever pack.
Build input nodes from a text file by inserting metadata,
build a vector index over the input nodes,
then after retrieval insert the text into the output nodes
before synthesis.
"""
def __init__(
self,
docs: List[Document] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# create the sentence window node parser w/ default settings
self.node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
self.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
self.embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-mpnet-base-v2", max_length=512
)
Settings.llm = self.llm
Settings.embed_model = self.embed_model
# extract nodes
nodes = self.node_parser.get_nodes_from_documents(docs)
self.sentence_index = VectorStoreIndex(nodes)
self.postprocessor = MetadataReplacementPostProcessor(
target_metadata_key="window"
)
self.query_engine = self.sentence_index.as_query_engine(
similarity_top_k=2,
# the target key defaults to `window` to match the node_parser's default
node_postprocessors=[self.postprocessor],
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"sentence_index": self.sentence_index,
"node_parser": self.node_parser,
"postprocessor": self.postprocessor,
"llm": self.llm,
"embed_model": self.embed_model,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.data_elements.bbox import bbox2result
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = MODELS.build(segm_head)
self.mask_head = MODELS.build(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = MODELS.build(segm_head)
self.mask_head = MODELS.build(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
# Disable yapf because it conflicts with isort.
# yapf: disable
from .misc import (align_tensor, aligned_bilinear, center_of_mass,
empty_instances, filter_gt_instances,
filter_scores_and_topk, flip_tensor, generate_coordinate,
images_to_levels, interpolate_as, levels_to_images,
mask2ndarray, multi_apply, relative_coordinate_maps,
rename_loss_dict, reweight_loss_dict,
samplelist_boxtype2tensor, select_single_mlvl,
sigmoid_geometric_mean, unfold_wo_center, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
from .wbf import weighted_boxes_fusion
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer', 'align_tensor', 'weighted_boxes_fusion'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer'
]
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.base_document import AnyDocument
from docarray.typing import (
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocument):
tensor: NdArray
torch_tensor: TorchTensor
embedding: AnyEmbedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocument):
tensor: NdArray
torch_tensor: TorchTensor
embedding: AnyEmbedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_dropout_basics(self):
self.run_layer_test(
layers.GaussianDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_gaussian_dropout_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)),
np.sqrt(0.3 / (1 - 0.3)),
atol=0.02,
)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_dropout_basics(self):
self.run_layer_test(
layers.GaussianDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_gaussian_dropout_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)),
np.sqrt(0.3 / (1 - 0.3)),
atol=0.02,
)
|
import torch
from keras.src.optimizers.base_optimizer import BaseOptimizer
from keras.src.utils import torch_utils
class TorchParallelOptimizer(BaseOptimizer):
@torch_utils.no_grad
def _backend_update_step(self, grads, trainable_variables, learning_rate):
self._parallel_update_step(
grads,
trainable_variables,
learning_rate,
)
@torch_utils.no_grad
def _backend_reset_gradient_accumulators(self):
acc_list = [
v.value for v in self._accumulated_gradients if v is not None
]
torch._foreach_mul_(acc_list, 0.0)
@torch_utils.no_grad
def _backend_increment_gradient_accumulators(self, grads, acc_grads):
acc_list = [v.value for v in acc_grads]
torch._foreach_add_(acc_list, grads, alpha=1.0)
|
import torch
from keras.src.optimizers.base_optimizer import BaseOptimizer
from keras.src.utils import torch_utils
class TorchParallelOptimizer(BaseOptimizer):
@torch_utils.no_grad
def _backend_update_step(self, grads, trainable_variables, learning_rate):
self._parallel_update_step(
grads,
trainable_variables,
learning_rate,
)
@torch_utils.no_grad
def _backend_reset_gradient_accumulators(self):
acc_list = [v.value for v in self._accumulated_gradients]
torch._foreach_mul_(acc_list, 0.0)
@torch_utils.no_grad
def _backend_increment_gradient_accumulators(self, grads, acc_grads):
acc_list = [v.value for v in acc_grads]
torch._foreach_add_(acc_list, grads, alpha=1.0)
|
"""A class for JAX specific optimizer logic.
Its purpose is to route around statelessness
requirements in cond ops used for EMA handling
and gradient accumulation handling. We do this
by skipping conditionals entirely.
"""
import jax
from jax import numpy as jnp
from keras.src.optimizers import base_optimizer
class JaxOptimizer(base_optimizer.BaseOptimizer):
def _backend_apply_gradients(self, grads, trainable_variables):
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
steps = self.gradient_accumulation_steps
current_trainable_vars_value = [
v.value for v in trainable_variables
]
current_optimizer_vars_value = [v.value for v in self.variables]
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
new_g_accs = jax.lax.cond(
is_update_step,
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)],
)
grads = jax.lax.cond(
is_update_step,
lambda: [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
],
lambda: list(grads),
)
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
new_trainable_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in trainable_variables],
lambda: current_trainable_vars_value,
)
new_opt_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in self.variables],
lambda: current_optimizer_vars_value,
)
for value, v in zip(new_trainable_vars, trainable_variables):
v.assign(value)
for value, v in zip(new_opt_vars, self.variables):
v.assign(value)
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency is not None:
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
should_overwrite_model_vars_int = (
should_overwrite_model_vars.astype("int32")
)
should_not_overwrite_model_vars_int = jnp.logical_not(
should_overwrite_model_vars
).astype("int32")
current_trainable_vars_value = [
v.value for v in self._trainable_variables
]
for var, average_var in zip(
self._trainable_variables,
self._model_variables_moving_average,
):
var.assign(
average_var * should_overwrite_model_vars_int
+ var.value * should_not_overwrite_model_vars_int
)
|
"""A class for JAX specific optimizer logic.
Its purpose is to route around statelessness
requirements in cond ops used for EMA handling
and gradient accumulation handling. We do this
by skipping conditionals entirely.
"""
import jax
from jax import numpy as jnp
from keras.src.optimizers import base_optimizer
class JaxOptimizer(base_optimizer.BaseOptimizer):
def _backend_apply_gradients(self, grads, trainable_variables):
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
steps = self.gradient_accumulation_steps
current_trainable_vars_value = [
v.value for v in trainable_variables
]
current_optimizer_vars_value = [v.value for v in self.variables]
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
new_g_accs = jax.lax.cond(
is_update_step,
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)],
)
grads = jax.lax.cond(
is_update_step,
lambda: [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
],
lambda: list(grads),
)
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
new_trainable_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in trainable_variables],
lambda: current_trainable_vars_value,
)
new_opt_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in self.variables],
lambda: current_optimizer_vars_value,
)
for value, v in zip(new_trainable_vars, trainable_variables):
v.assign(value)
for value, v in zip(new_opt_vars, self.variables):
v.assign(value)
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency is not None:
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
should_overwrite_model_vars_int = (
should_overwrite_model_vars.astype("int32")
)
should_not_overwrite_model_vars_int = jnp.logical_not(
should_overwrite_model_vars
).astype("int32")
current_trainable_vars_value = [
v.value for v in self._trainable_variables
]
for var, average_var in zip(
self._trainable_variables,
self._model_variables_moving_average,
):
var.assign(
average_var * should_overwrite_model_vars_int
+ var.value * should_not_overwrite_model_vars_int
)
self._iterations.assign_add(1)
|
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :class:`sentence_transformers.models.Module`, base class for all input modules in the Sentence
Transformers library, i.e. modules that are used to process inputs and optionally also perform processing
in the forward pass.
This class provides a common interface for all input modules, including methods for loading and saving the module's
configuration and weights, as well as input processing. It also provides a method for performing the forward pass
of the module.
Three abstract methods are defined in this class, which must be implemented by subclasses:
- :meth:`sentence_transformers.models.Module.forward`: The forward pass of the module.
- :meth:`sentence_transformers.models.Module.save`: Save the module to disk.
- :meth:`sentence_transformers.models.InputModule.tokenize`: Tokenize the input texts and return a dictionary of tokenized features.
Optionally, you may also have to override:
- :meth:`sentence_transformers.models.Module.load`: Load the module from disk.
To assist with loading and saving the module, several utility methods are provided:
- :meth:`sentence_transformers.models.Module.load_config`: Load the module's configuration from a JSON file.
- :meth:`sentence_transformers.models.Module.load_file_path`: Load a file from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_dir_path`: Load a directory from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_torch_weights`: Load the PyTorch weights of the module, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.save_config`: Save the module's configuration to a JSON file.
- :meth:`sentence_transformers.models.Module.save_torch_weights`: Save the PyTorch weights of the module.
- :meth:`sentence_transformers.models.InputModule.save_tokenizer`: Save the tokenizer used by the module.
- :meth:`sentence_transformers.models.Module.get_config_dict`: Get the module's configuration as a dictionary.
And several class variables are defined to assist with loading and saving the module:
- :attr:`sentence_transformers.models.Module.config_file_name`: The name of the configuration file used to save the module's configuration.
- :attr:`sentence_transformers.models.Module.config_keys`: A list of keys used to save the module's configuration.
- :attr:`sentence_transformers.models.InputModule.save_in_root`: Whether to save the module's configuration in the root directory of the model or in a subdirectory named after the module.
- :attr:`sentence_transformers.models.InputModule.tokenizer`: The tokenizer used by the module.
"""
save_in_root: bool = True
tokenizer: PreTrainedTokenizerBase | Tokenizer
"""
The tokenizer used for tokenizing the input texts. It can be either a
:class:`transformers.PreTrainedTokenizerBase` subclass or a Tokenizer from the
``tokenizers`` library.
"""
@abstractmethod
def tokenize(self, texts: list[str], **kwargs) -> dict[str, torch.Tensor | Any]:
"""
Tokenizes the input texts and returns a dictionary of tokenized features.
Args:
texts (list[str]): List of input texts to tokenize.
**kwargs: Additional keyword arguments for tokenization, e.g. ``task``.
Returns:
dict[str, torch.Tensor | Any]: Dictionary containing tokenized features, e.g.
``{"input_ids": ..., "attention_mask": ...}``
"""
def save_tokenizer(self, output_path: str, **kwargs) -> None:
"""
Saves the tokenizer to the specified output path.
Args:
output_path (str): Path to save the tokenizer.
**kwargs: Additional keyword arguments for saving the tokenizer.
Returns:
None
"""
if not hasattr(self, "tokenizer"):
return
if isinstance(self.tokenizer, PreTrainedTokenizerBase):
self.tokenizer.save_pretrained(output_path, **kwargs)
elif isinstance(self.tokenizer, Tokenizer):
self.tokenizer.save(output_path, **kwargs)
return
|
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :class:`sentence_transformers.models.Module`, base class for all input modules in the Sentence
Transformers library, i.e. modules that are used to process inputs and optionally also perform processing
in the forward pass.
This class provides a common interface for all input modules, including methods for loading and saving the module's
configuration and weights, as well as input processing. It also provides a method for performing the forward pass
of the module.
Three abstract methods are defined in this class, which must be implemented by subclasses:
- :meth:`sentence_transformers.models.Module.forward`: The forward pass of the module.
- :meth:`sentence_transformers.models.Module.save`: Save the module to disk.
- :meth:`sentence_transformers.models.InputModule.tokenize`: Tokenize the input texts and return a dictionary of tokenized features.
Optionally, you may also have to override:
- :meth:`sentence_transformers.models.Module.load`: Load the module from disk.
To assist with loading and saving the module, several utility methods are provided:
- :meth:`sentence_transformers.models.Module.load_config`: Load the module's configuration from a JSON file.
- :meth:`sentence_transformers.models.Module.load_file_path`: Load a file from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_dir_path`: Load a directory from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_torch_weights`: Load the PyTorch weights of the module, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.save_config`: Save the module's configuration to a JSON file.
- :meth:`sentence_transformers.models.Module.save_torch_weights`: Save the PyTorch weights of the module.
- :meth:`sentence_transformers.models.InputModule.save_tokenizer`: Save the tokenizer used by the module.
- :meth:`sentence_transformers.models.Module.get_config_dict`: Get the module's configuration as a dictionary.
And several class variables are defined to assist with loading and saving the module:
- :attr:`sentence_transformers.models.Module.config_file_name`: The name of the configuration file used to save the module's configuration.
- :attr:`sentence_transformers.models.Module.config_keys`: A list of keys used to save the module's configuration.
- :attr:`sentence_transformers.models.InputModule.save_in_root`: Whether to save the module's configuration in the root directory of the model or in a subdirectory named after the module.
- :attr:`sentence_transformers.models.InputModule.tokenizer`: The tokenizer used by the module.
"""
save_in_root: bool = True
tokenizer: PreTrainedTokenizerBase | Tokenizer
"""
The tokenizer used for tokenizing the input texts. It can be either a
:class:`transformers.PreTrainedTokenizerBase` subclass or a Tokenizer from the
``tokenizers`` library.
"""
@abstractmethod
def tokenize(self, texts: list[str], **kwargs) -> dict[str, torch.Tensor | Any]:
"""
Tokenizes the input texts and returns a dictionary of tokenized features.
Args:
texts (list[str]): List of input texts to tokenize.
**kwargs: Additional keyword arguments for tokenization.
Returns:
dict[str, torch.Tensor | Any]: Dictionary containing tokenized features, e.g.
``{"input_ids": ..., "attention_mask": ...}``
"""
def save_tokenizer(self, output_path: str, **kwargs) -> None:
"""
Saves the tokenizer to the specified output path.
Args:
output_path (str): Path to save the tokenizer.
**kwargs: Additional keyword arguments for saving the tokenizer.
Returns:
None
"""
if not hasattr(self, "tokenizer"):
return
if isinstance(self.tokenizer, PreTrainedTokenizerBase):
self.tokenizer.save_pretrained(output_path, **kwargs)
elif isinstance(self.tokenizer, Tokenizer):
self.tokenizer.save(output_path, **kwargs)
return
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[Image] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_update_complex(doc1, doc2):
doc1.update(doc2)
# doc1 is changed in place (no extra memory)
assert doc1.text == 'hey here 2'
assert doc1.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(doc1.matches) == 2
assert doc1.opt_int == 5
assert doc1.price == 5
assert doc1.test_set == {'a', 'b'}
assert len(doc1.matches_with_same_id) == 1
assert len(doc1.matches_with_same_id[0].matches) == 2
assert doc1.inner_doc.integer == 3
assert doc1.inner_doc.inner_list == ['c', 'd', 'a', 'b']
assert doc1.test_dict == {'a': 10, 'b': 10, 'c': 3, 'd': 4, 'z': None}
def test_update_simple():
class MyDocument(BaseDocument):
content: str
title: Optional[str] = None
tags_: List
my_doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
my_doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
my_doc1.update(my_doc2)
assert my_doc1.content == 'Core content updated'
assert my_doc1.title == 'Title'
assert my_doc1.tags_ == ['python', 'AI', 'docarray']
def test_update_different_schema_fails():
class DocA(BaseDocument):
content: str
class DocB(BaseDocument):
image: Optional[Image] = None
docA = DocA(content='haha')
docB = DocB()
with pytest.raises(Exception):
docA.update(docB)
|
import pytest
from typing import Optional, List, Dict, Set
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
class InnerDoc(BaseDocument):
integer: int
l: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[Image] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, l=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, l=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_update_complex(doc1, doc2):
doc1.update(doc2)
# doc1 is changed in place (no extra memory)
assert doc1.text == 'hey here 2'
assert doc1.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(doc1.matches) == 2
assert doc1.opt_int == 5
assert doc1.price == 5
assert doc1.test_set == {'a', 'b'}
assert len(doc1.matches_with_same_id) == 1
assert len(doc1.matches_with_same_id[0].matches) == 2
assert doc1.inner_doc.integer == 3
assert doc1.inner_doc.l == ['c', 'd', 'a', 'b']
assert doc1.test_dict == {'a': 10, 'b': 10, 'c': 3, 'd': 4, 'z': None}
def test_update_simple():
class MyDocument(BaseDocument):
content: str
title: Optional[str] = None
tags_: List
my_doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
my_doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
my_doc1.update(my_doc2)
assert my_doc1.content == 'Core content updated'
assert my_doc1.title == 'Title'
assert my_doc1.tags_ == ['python', 'AI', 'docarray']
def test_update_different_schema_fails():
class DocA(BaseDocument):
content: str
class DocB(BaseDocument):
image: Optional[Image] = None
docA = DocA(content='haha')
docB = DocB()
with pytest.raises(Exception):
docA.update(docB)
|
import io
import warnings
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
from PIL import Image
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = Image.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
from PIL import Image
np_array = self.get_comp_backend().to_numpy(self)
img = Image.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import io
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
from PIL import Image
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = Image.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
# Method 2: Use backend_args, file_client_args in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm'],
backend_args=backend_args),
dict(
type='CityScapesMetric',
seg_prefix=data_root + 'gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance',
backend_args=backend_args)
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm']),
dict(
type='CityScapesMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
seg_prefix=data_root + '/gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance')
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
MODULE2PACKAGE = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
'mmyolo': 'mmyolo',
}
# PKG2PROJECT is not a proper name to represent the mapping between module name
# (module import from) and package name (used by pip install). Therefore,
# PKG2PROJECT will be deprecated and this alias will only be kept until
# MMEngine v1.0.0
PKG2PROJECT = MODULE2PACKAGE
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in MODULE2PACKAGE, (
f'mmengine does not support to load {package} config.')
package = MODULE2PACKAGE[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
PKG2PROJECT = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction2': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
'mmyolo': 'mmyolo',
}
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in PKG2PROJECT, 'mmengine does not support to load ' \
f'{package} config.'
package = PKG2PROJECT[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.5.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.5.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
SCRIPTS_VERSION = "main" if version.parse(__version__).is_devrelease else __version__
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
BBOX_ASSIGNERS = Registry('bbox_assigner')
BBOX_SAMPLERS = Registry('bbox_sampler')
BBOX_CODERS = Registry('bbox_coder')
def build_assigner(cfg, **default_args):
"""Builder of box assigner."""
return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
def build_sampler(cfg, **default_args):
"""Builder of box sampler."""
return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
def build_bbox_coder(cfg, **default_args):
"""Builder of box coder."""
return build_from_cfg(cfg, BBOX_CODERS, default_args)
|
from mmcv.utils import Registry, build_from_cfg
BBOX_ASSIGNERS = Registry('bbox_assigner')
BBOX_SAMPLERS = Registry('bbox_sampler')
BBOX_CODERS = Registry('bbox_coder')
def build_assigner(cfg, **default_args):
"""Builder of box assigner."""
return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
def build_sampler(cfg, **default_args):
"""Builder of box sampler."""
return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
def build_bbox_coder(cfg, **default_args):
"""Builder of box coder."""
return build_from_cfg(cfg, BBOX_CODERS, default_args)
|
import numpy as np
import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AutoContrastTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.AutoContrast,
init_kwargs={
"value_range": (20, 200),
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_constant_channels_dont_get_nanned(self):
img = np.array([1, 1], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
def test_auto_contrast_expands_value_range(self):
img = np.array([0, 128], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_different_values_per_channel(self):
img = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype="float32",
)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 255.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 255.0))
self.assertAllClose(
ys,
[
[
[[0.0, 0.0, 0.0], [85.0, 85.0, 85.0]],
[[170.0, 170.0, 170.0], [255.0, 255.0, 255.0]],
]
],
)
def test_auto_contrast_expands_value_range_uint8(self):
img = np.array([0, 128], dtype="uint8")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_properly_converts_value_range(self):
img = np.array([0, 0.5], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 1))
ys = layer(img)
self.assertAllClose(
ops.convert_to_numpy(ys[0]), np.array([[[0.0]], [[1]]])
)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AutoContrastTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.AutoContrast,
init_kwargs={
"value_range": (20, 200),
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_constant_channels_dont_get_nanned(self):
img = np.array([1, 1], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
def test_auto_contrast_expands_value_range(self):
img = np.array([0, 128], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_different_values_per_channel(self):
img = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype="float32",
)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 255.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 255.0))
self.assertAllClose(
ys,
[
[
[[0.0, 0.0, 0.0], [85.0, 85.0, 85.0]],
[[170.0, 170.0, 170.0], [255.0, 255.0, 255.0]],
]
],
)
def test_auto_contrast_expands_value_range_uint8(self):
img = np.array([0, 128], dtype="uint8")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_properly_converts_value_range(self):
img = np.array([0, 0.5], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 1))
ys = layer(img)
self.assertAllClose(
ops.convert_to_numpy(ys[0]), np.array([[[0.0]], [[1]]])
)
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.JINA],
Literal["api_key"],
]
def JinaCredentialsField() -> JinaCredentialsInput:
"""
Creates a Jina credentials input on a block.
"""
return CredentialsField(
description="The Jina integration can be used with an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal["jina"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
def JinaCredentialsField() -> JinaCredentialsInput:
"""
Creates a Jina credentials input on a block.
"""
return CredentialsField(
provider="jina",
supported_credential_types={"api_key"},
description="The Jina integration can be used with an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
import os
import urllib.parse
from typing import Dict, Optional, Union
from llama_index.core.base.llms.generic_utils import (
get_from_param_or_env,
)
# Import SecretStr directly from pydantic
# since there is not one in llama_index.core.bridge.pydantic
try:
from pydantic.v1 import SecretStr
except ImportError:
from pydantic import SecretStr
def resolve_watsonx_credentials(
*,
url: Optional[str] = None,
apikey: Optional[str] = None,
token: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
instance_id: Optional[str] = None,
) -> Dict[str, SecretStr]:
"""
Resolve watsonx.ai credentials. If the value of given param is None
then tries to find corresponding environment variable.
:raises ValueError: raises when value of required attribute is not found
:return: Dictionary with resolved credentials items
:rtype: Dict[str, SecretStr]
"""
creds = {}
creds["url"] = convert_to_secret_str(
get_from_param_or_env("url", url, "WATSONX_URL")
)
parsed_url = urllib.parse.urlparse(creds["url"].get_secret_value())
if parsed_url.netloc.endswith("cloud.ibm.com"):
if not (apikey or "WATSONX_APIKEY" in os.environ) and not (
token or "WATSONX_TOKEN" in os.environ
):
raise ValueError(
"Did not find 'apikey' or 'token',"
" please add an environment variable"
" `WATSONX_APIKEY` or 'WATSONX_TOKEN' "
"which contains it,"
" or pass 'apikey' or 'token'"
" as a named parameter."
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
else:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
else:
if (
not token
and "WATSONX_TOKEN" not in os.environ
and not password
and "WATSONX_PASSWORD" not in os.environ
and not apikey
and "WATSONX_APIKEY" not in os.environ
):
raise ValueError(
"Did not find 'token', 'password' or 'apikey',"
" please add an environment variable"
" `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' "
"which contains it,"
" or pass 'token', 'password' or 'apikey'"
" as a named parameter."
)
elif token or "WATSONX_TOKEN" in os.environ:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
elif password or "WATSONX_PASSWORD" in os.environ:
creds["password"] = convert_to_secret_str(
get_from_param_or_env("password", password, "WATSONX_PASSWORD")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
if not instance_id or "WATSONX_INSTANCE_ID" not in os.environ:
creds["instance_id"] = convert_to_secret_str(
get_from_param_or_env("instance_id", instance_id, "WATSONX_INSTANCE_ID")
)
return creds
def convert_to_secret_str(value: Union[SecretStr, str]) -> SecretStr:
"""Convert a string to a SecretStr."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
|
import os
import urllib.parse
from typing import Dict, Optional, Union
from llama_index.core.base.llms.generic_utils import (
get_from_param_or_env,
)
# Import SecretStr directly from pydantic
# since there is not one in llama_index.core.bridge.pydantic
try:
from pydantic.v1 import SecretStr
except ImportError:
from pydantic import SecretStr
def resolve_watsonx_credentials(
*,
url: Optional[str] = None,
apikey: Optional[str] = None,
token: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
instance_id: Optional[str] = None
) -> Dict[str, SecretStr]:
"""
Resolve watsonx.ai credentials. If the value of given param is None
then tries to find corresponding environment variable.
:raises ValueError: raises when value of required attribute is not found
:return: Dictionary with resolved credentials items
:rtype: Dict[str, SecretStr]
"""
creds = {}
creds["url"] = convert_to_secret_str(
get_from_param_or_env("url", url, "WATSONX_URL")
)
parsed_url = urllib.parse.urlparse(creds["url"].get_secret_value())
if parsed_url.netloc.endswith("cloud.ibm.com"):
if not (apikey or "WATSONX_APIKEY" in os.environ) and not (
token or "WATSONX_TOKEN" in os.environ
):
raise ValueError(
"Did not find 'apikey' or 'token',"
" please add an environment variable"
" `WATSONX_APIKEY` or 'WATSONX_TOKEN' "
"which contains it,"
" or pass 'apikey' or 'token'"
" as a named parameter."
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
else:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
else:
if (
not token
and "WATSONX_TOKEN" not in os.environ
and not password
and "WATSONX_PASSWORD" not in os.environ
and not apikey
and "WATSONX_APIKEY" not in os.environ
):
raise ValueError(
"Did not find 'token', 'password' or 'apikey',"
" please add an environment variable"
" `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' "
"which contains it,"
" or pass 'token', 'password' or 'apikey'"
" as a named parameter."
)
elif token or "WATSONX_TOKEN" in os.environ:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
elif password or "WATSONX_PASSWORD" in os.environ:
creds["password"] = convert_to_secret_str(
get_from_param_or_env("password", password, "WATSONX_PASSWORD")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
if not instance_id or "WATSONX_INSTANCE_ID" not in os.environ:
creds["instance_id"] = convert_to_secret_str(
get_from_param_or_env("instance_id", instance_id, "WATSONX_INSTANCE_ID")
)
return creds
def convert_to_secret_str(value: Union[SecretStr, str]) -> SecretStr:
"""Convert a string to a SecretStr."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
|
import pytest
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import TextDoc
from docarray.utils._internal.pydantic import is_pydantic_v2
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_simple_init():
t = TextDoc(text='hello')
assert t.text == 'hello'
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_str_init():
t = parse_obj_as(TextDoc, 'hello')
assert t.text == 'hello'
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_doc():
class MyDoc(BaseDoc):
text1: TextDoc
text2: TextDoc
doc = MyDoc(text1='hello', text2=TextDoc(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import TextDoc
def test_simple_init():
t = TextDoc(text='hello')
assert t.text == 'hello'
def test_str_init():
t = parse_obj_as(TextDoc, 'hello')
assert t.text == 'hello'
def test_doc():
class MyDoc(BaseDoc):
text1: TextDoc
text2: TextDoc
doc = MyDoc(text1='hello', text2=TextDoc(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
text: str
"""Generated text output."""
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default namespace is ["langchain", "schema", "output"].
"""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
def __add__(self, other: GenerationChunk) -> GenerationChunk:
"""Concatenate two GenerationChunks."""
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
|
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from pydantic import computed_field
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
def __init__(
self,
text: str = "",
generation_info: Optional[dict[str, Any]] = None,
**kwargs: Any,
):
"""Initialize a Generation."""
super().__init__(generation_info=generation_info, **kwargs)
self._text = text
# workaround for ChatGeneration so that we can use a computed field to populate
# the text field from the message content (parent class needs to have a property)
@computed_field # type: ignore[prop-decorator]
@property
def text(self) -> str:
"""The text contents of the output."""
return self._text
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default namespace is ["langchain", "schema", "output"].
"""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
def __init__(
self,
text: str = "",
generation_info: Optional[dict[str, Any]] = None,
**kwargs: Any,
):
"""Initialize a GenerationChunk."""
super().__init__(text=text, generation_info=generation_info, **kwargs)
self._text = text
def __add__(self, other: GenerationChunk) -> GenerationChunk:
"""Concatenate two GenerationChunks."""
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
|
from workflows.retry_policy import RetryPolicy, ConstantDelayRetryPolicy # noqa
|
from typing import Protocol, Optional, runtime_checkable
@runtime_checkable
class RetryPolicy(Protocol):
def next(
self, elapsed_time: float, attempts: int, error: Exception
) -> Optional[float]:
"""
Decides if we should make another retry, returning the number of seconds to wait before the next run.
Args:
elapsed_time: Time in seconds that passed since the last attempt.
attempts: The number of attempts done so far.
error: The last error occurred.
Returns:
The amount of seconds to wait before the next attempt, or None if we stop retrying.
"""
class ConstantDelayRetryPolicy:
"""A simple policy that retries a step at regular intervals for a number of times."""
def __init__(self, maximum_attempts: int = 3, delay: float = 5) -> None:
"""
Creates a ConstantDelayRetryPolicy instance.
Args:
maximum_attempts: How many consecutive times the workflow should try to run the step in case of an error.
delay: how much time in seconds must pass before another attempt.
"""
self.maximum_attempts = maximum_attempts
self.delay = delay
def next(
self, elapsed_time: float, attempts: int, error: Exception
) -> Optional[float]:
if attempts >= self.maximum_attempts:
return None
return self.delay
|
from typing import Any, Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.storage.kvstore.couchbase import CouchbaseKVStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
class CouchbaseDocumentStore(KVDocumentStore):
"""
Couchbase Document (Node) store.
A documents store for Document and Node objects using Couchbase.
"""
def __init__(
self,
couchbase_kvstore: CouchbaseKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
node_collection_suffix: Optional[str] = None,
ref_doc_collection_suffix: Optional[str] = None,
metadata_collection_suffix: Optional[str] = None,
) -> None:
"""
Initialize a CouchbaseDocumentStore.
Args:
couchbase_kvstore (CouchbaseKVStore): Couchbase key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for fetching documents
node_collection_suffix (str): suffix for the node collection
ref_doc_collection_suffix (str): suffix for the Refdoc collection
metadata_collection_suffix (str): suffix for the metadata collection
"""
super().__init__(
couchbase_kvstore,
namespace=namespace,
batch_size=batch_size,
node_collection_suffix=node_collection_suffix,
ref_doc_collection_suffix=ref_doc_collection_suffix,
metadata_collection_suffix=metadata_collection_suffix,
)
@classmethod
def from_couchbase_client(
cls,
client: Any,
bucket_name: str,
scope_name: str,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
node_collection_suffix: Optional[str] = None,
ref_doc_collection_suffix: Optional[str] = None,
metadata_collection_suffix: Optional[str] = None,
async_client: Optional[Any] = None,
) -> "CouchbaseDocumentStore":
"""Initialize a CouchbaseDocumentStore from a Couchbase client."""
couchbase_kvstore = CouchbaseKVStore.from_couchbase_client(
client=client,
bucket_name=bucket_name,
scope_name=scope_name,
async_client=async_client,
)
return cls(
couchbase_kvstore,
namespace,
batch_size,
node_collection_suffix,
ref_doc_collection_suffix,
metadata_collection_suffix,
)
|
from typing import Any, Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.storage.kvstore.couchbase import CouchbaseKVStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
class CouchbaseDocumentStore(KVDocumentStore):
"""
Couchbase Document (Node) store.
A documents store for Document and Node objects using Couchbase.
"""
def __init__(
self,
couchbase_kvstore: CouchbaseKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
node_collection_suffix: Optional[str] = None,
ref_doc_collection_suffix: Optional[str] = None,
metadata_collection_suffix: Optional[str] = None,
) -> None:
"""
Initialize a CouchbaseDocumentStore.
Args:
couchbase_kvstore (CouchbaseKVStore): Couchbase key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for fetching documents
node_collection_suffix (str): suffix for the node collection
ref_doc_collection_suffix (str): suffix for the Refdoc collection
metadata_collection_suffix (str): suffix for the metadata collection
"""
super().__init__(
couchbase_kvstore,
namespace=namespace,
batch_size=batch_size,
node_collection_suffix=node_collection_suffix,
ref_doc_collection_suffix=ref_doc_collection_suffix,
metadata_collection_suffix=metadata_collection_suffix,
)
@classmethod
def from_couchbase_client(
cls,
client: Any,
bucket_name: str,
scope_name: str,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
node_collection_suffix: Optional[str] = None,
ref_doc_collection_suffix: Optional[str] = None,
metadata_collection_suffix: Optional[str] = None,
async_client: Optional[Any] = None,
) -> "CouchbaseDocumentStore":
"""Initialize a CouchbaseDocumentStore from a Couchbase client."""
couchbase_kvstore = CouchbaseKVStore.from_couchbase_client(
client=client,
bucket_name=bucket_name,
scope_name=scope_name,
async_client=async_client,
)
return cls(
couchbase_kvstore,
namespace,
batch_size,
node_collection_suffix,
ref_doc_collection_suffix,
metadata_collection_suffix,
)
|
from base64 import b64encode
from typing import Optional
from urllib.parse import urlencode
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from .base import BaseOAuthHandler
class NotionOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at https://developers.notion.com/docs/authorization
Notes:
- Notion uses non-expiring access tokens and therefore doesn't have a refresh flow
- Notion doesn't use scopes
"""
PROVIDER_NAME = ProviderName.NOTION
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.auth_base_url = "https://api.notion.com/v1/oauth/authorize"
self.token_url = "https://api.notion.com/v1/oauth/token"
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"owner": "user",
"state": state,
}
return f"{self.auth_base_url}?{urlencode(params)}"
def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
request_body = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
}
auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode()
headers = {
"Authorization": f"Basic {auth_str}",
"Accept": "application/json",
}
response = Requests().post(self.token_url, json=request_body, headers=headers)
token_data = response.json()
# Email is only available for non-bot users
email = (
token_data["owner"]["person"]["email"]
if "person" in token_data["owner"]
and "email" in token_data["owner"]["person"]
else None
)
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=token_data.get("workspace_name"),
username=email,
access_token=token_data["access_token"],
refresh_token=None,
access_token_expires_at=None, # Notion tokens don't expire
refresh_token_expires_at=None,
scopes=[],
metadata={
"owner": token_data["owner"],
"bot_id": token_data["bot_id"],
"workspace_id": token_data["workspace_id"],
"workspace_name": token_data.get("workspace_name"),
"workspace_icon": token_data.get("workspace_icon"),
},
)
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# Notion doesn't support token revocation
return False
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# Notion doesn't support token refresh
return credentials
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
# Notion access tokens don't expire
return False
|
from base64 import b64encode
from typing import Optional
from urllib.parse import urlencode
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
from backend.util.request import requests
from .base import BaseOAuthHandler
class NotionOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at https://developers.notion.com/docs/authorization
Notes:
- Notion uses non-expiring access tokens and therefore doesn't have a refresh flow
- Notion doesn't use scopes
"""
PROVIDER_NAME = ProviderName.NOTION
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.auth_base_url = "https://api.notion.com/v1/oauth/authorize"
self.token_url = "https://api.notion.com/v1/oauth/token"
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"owner": "user",
"state": state,
}
return f"{self.auth_base_url}?{urlencode(params)}"
def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
request_body = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
}
auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode()
headers = {
"Authorization": f"Basic {auth_str}",
"Accept": "application/json",
}
response = requests.post(self.token_url, json=request_body, headers=headers)
token_data = response.json()
# Email is only available for non-bot users
email = (
token_data["owner"]["person"]["email"]
if "person" in token_data["owner"]
and "email" in token_data["owner"]["person"]
else None
)
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=token_data.get("workspace_name"),
username=email,
access_token=token_data["access_token"],
refresh_token=None,
access_token_expires_at=None, # Notion tokens don't expire
refresh_token_expires_at=None,
scopes=[],
metadata={
"owner": token_data["owner"],
"bot_id": token_data["bot_id"],
"workspace_id": token_data["workspace_id"],
"workspace_name": token_data.get("workspace_name"),
"workspace_icon": token_data.get("workspace_icon"),
},
)
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# Notion doesn't support token revocation
return False
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# Notion doesn't support token refresh
return credentials
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
# Notion access tokens don't expire
return False
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self,
port: int = None,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
**kwargs
):
super().__init__(**kwargs)
self.port = port
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
import os
import httpx
import pytest
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.cohere import CohereEmbedding
def test_embedding_class():
emb = CohereEmbedding(api_key="token")
assert isinstance(emb, BaseEmbedding)
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_sync_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_client=httpx.Client(),
)
emb.get_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio()
async def test_async_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_async_client=httpx.AsyncClient(),
)
await emb.aget_query_embedding("I love Cohere!")
def test_cohere_embeddings_custom_endpoint_multiprocessing():
"""When used in multiprocessing, the CohereEmbedding instance will be serialized and deserialized. This test
verifies, that custom base_url's are retained in the spawned processes.
"""
# Arrange: Create a CohereEmbeddings instance with a custom base_url
custom_base_url = "test_endpoint"
api_key = "test_api_key"
embeddings = CohereEmbedding(api_key=api_key, base_url=custom_base_url)
# Act: Simulate serialization and deserialization
serialized_data = embeddings.__getstate__()
deserialized_embeddings = CohereEmbedding.__new__(CohereEmbedding)
deserialized_embeddings.__setstate__(serialized_data)
# Assert: Verify that the deserialized instance retains the correct base_url
assert deserialized_embeddings.base_url == custom_base_url
|
import os
import httpx
import pytest
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.cohere import CohereEmbedding
def test_embedding_class():
emb = CohereEmbedding(api_key="token")
assert isinstance(emb, BaseEmbedding)
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_sync_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_client=httpx.Client(),
)
emb.get_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio()
async def test_async_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_async_client=httpx.AsyncClient(),
)
await emb.aget_query_embedding("I love Cohere!")
|
from llama_index_instrumentation.base import BaseInstrumentationHandler # noqa
|
from abc import ABC, abstractmethod
class BaseInstrumentationHandler(ABC):
@classmethod
@abstractmethod
def init(cls) -> None:
"""Initialize the instrumentation handler."""
|
from .tensor import flush_ndarray, read_ndarray
|
from collections import defaultdict
from typing import TYPE_CHECKING, Optional
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
from docarray.proto.io.ndarray import flush_ndarray, read_ndarray
from docarray.proto.docarray_pb2 import NdArrayProto, DocumentProto
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def parse_proto(pb_msg: 'DocumentProto') -> 'Document':
from docarray import Document
from docarray.score import NamedScore
fields = {}
for (field, value) in pb_msg.ListFields():
f_name = field.name
if f_name == 'chunks' or f_name == 'matches':
fields[f_name] = [Document.from_protobuf(d) for d in value]
elif isinstance(value, NdArrayProto):
fields[f_name] = read_ndarray(value)
elif isinstance(value, Struct):
fields[f_name] = MessageToDict(value, preserving_proto_field_name=True)
elif f_name == 'location':
fields[f_name] = list(value)
elif f_name == 'scores' or f_name == 'evaluations':
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(
{ff.name: vv for (ff, vv) in v.ListFields()}
)
else:
fields[f_name] = value
return Document(**fields)
def flush_proto(doc: 'Document', ndarray_type: Optional[str] = None) -> 'DocumentProto':
pb_msg = DocumentProto()
for key in doc.non_empty_fields:
try:
value = getattr(doc, key)
if key in ('tensor', 'embedding'):
flush_ndarray(getattr(pb_msg, key), value, ndarray_type=ndarray_type)
elif key in ('chunks', 'matches'):
for d in value:
d: Document
docs = getattr(pb_msg, key)
docs.append(d.to_protobuf())
elif key == 'tags':
pb_msg.tags.update(value)
elif key == '_metadata':
pb_msg._metadata.update(value)
elif key in ('scores', 'evaluations'):
for kk, vv in value.items():
for ff in vv.non_empty_fields:
setattr(getattr(pb_msg, key)[kk], ff, getattr(vv, ff))
elif key == 'location':
pb_msg.location.extend(value)
elif key == 'content':
pass # intentionally ignore `content` field as it is just a proxy
else:
# other simple fields
setattr(pb_msg, key, value)
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
f'Field `{key}` contains cyclic reference in memory. '
f'Could it be your Document is referring to itself?',
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{key}` is problematic',) + ex.args
raise
return pb_msg
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5": "kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseAnglELoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MultionToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MultionToolkit",
]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video processor class for LLaVa-Onevision."""
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
)
from ...processing_utils import Unpack, VideosKwargs
from ...utils import is_vision_available
from ...utils.import_utils import requires
from ...video_processing_utils import (
BaseVideoProcessor,
)
if is_vision_available():
from ...image_utils import PILImageResampling
class LlavaOnevisionFastVideoProcessorInitKwargs(VideosKwargs): ...
@requires(backends=("torchvision",))
class LlavaOnevisionVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
rescale_factor = 1 / 255
default_to_square = False
crop_size = None
do_resize = True
do_center_crop = None
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
valid_kwargs = LlavaOnevisionFastVideoProcessorInitKwargs
model_input_names = ["pixel_values_videos"]
def __init__(self, **kwargs: Unpack[LlavaOnevisionFastVideoProcessorInitKwargs]):
super().__init__(**kwargs)
__all__ = ["LlavaOnevisionVideoProcessor"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video processor class for LLaVa-Onevision."""
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
)
from ...processing_utils import Unpack, VideosKwargs
from ...utils import is_vision_available
from ...utils.import_utils import requires
from ...video_processing_utils import (
BaseVideoProcessor,
)
if is_vision_available():
from ...image_utils import PILImageResampling
class LlavaOnevisionFastVideoProcessorInitKwargs(VideosKwargs): ...
@requires(backends=("torchvision",))
class LlavaOnevisionVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
rescale_factor = 1 / 255
default_to_square = False
crop_size = None
do_resize = True
do_center_crop = None
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = LlavaOnevisionFastVideoProcessorInitKwargs
model_input_names = ["pixel_values_videos"]
def __init__(self, **kwargs: Unpack[LlavaOnevisionFastVideoProcessorInitKwargs]):
super().__init__(**kwargs)
__all__ = ["LlavaOnevisionVideoProcessor"]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
max_epochs = 12
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}),
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
max_epochs = 12
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}),
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05))
|
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocArray.from_protobuf(da.to_protobuf())
|
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDocument):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
|
# Copyright (c) OpenMMLab. All rights reserved.
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR,
LinearLR, MultiStepLR, PolyLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
ExponentialMomentum, LinearMomentum,
MultiStepMomentum, PolyMomentum, StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, PolyParamScheduler,
StepParamScheduler, _ParamScheduler)
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler',
'PolyParamScheduler', 'PolyLR', 'PolyMomentum'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR,
LinearLR, MultiStepLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
ExponentialMomentum, LinearMomentum,
MultiStepMomentum, StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, StepParamScheduler,
_ParamScheduler)
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler'
]
|
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
_base_ = './cascade-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
"""Lit configuration to drive test in this repo."""
# Copyright 2020 The OpenXLA Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- Python -*-
# pylint: disable=undefined-variable
import os
import sys
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
import lit.util
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'MLIR_HLO_OPT'
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.mlir']
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
config.substitutions.append(('%PATH%', config.environment['PATH']))
config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
llvm_config.with_system_environment(['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
# Adjusted the PATH to correctly detect the tools on Windows
if sys.platform == 'win32':
llvm_config.config.llvm_tools_dir = r'..\llvm-project\llvm'
llvm_config.config.mlir_binary_dir = r'..\llvm-project\mlir'
llvm_config.use_default_substitutions()
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = [
config.mlir_binary_dir,
config.mlir_hlo_tools_dir,
]
tools = [
'mlir-hlo-opt',
'mlir-runner',
ToolSubst('%mlir_lib_dir', config.mlir_lib_dir, unresolved='ignore'),
]
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
"""Lit configuration to drive test in this repo."""
# Copyright 2020 The OpenXLA Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- Python -*-
# pylint: disable=undefined-variable
import os
import sys
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
import lit.util
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'MLIR_HLO_OPT'
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.mlir']
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
config.substitutions.append(('%PATH%', config.environment['PATH']))
config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
llvm_config.with_system_environment(['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
# Adjusted the PATH to correctly detect the tools on Windows
if sys.platform == 'win32':
llvm_config.config.llvm_tools_dir = r'..\llvm-project\llvm'
llvm_config.config.mlir_binary_dir = r'..\llvm-project\mlir'
llvm_config.use_default_substitutions()
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = [
config.mlir_binary_dir,
config.mlir_hlo_tools_dir,
]
tools = [
'mlir-hlo-opt',
'mlir-cpu-runner',
ToolSubst('%mlir_lib_dir', config.mlir_lib_dir, unresolved='ignore'),
]
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of NdArray, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import ImageNdArray, ImageUrl
class MyImageDoc(BaseDocument):
title: str
tensor: Optional[ImageNdArray]
url: Optional[ImageUrl]
bytes: Optional[bytes]
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of NdArray, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import ImageNdArray, ImageUrl
import numpy as np
class MyImageDoc(Document):
title: str
tensor: Optional[ImageNdArray]
url: Optional[ImageUrl]
bytes: Optional[bytes]
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url='https://an.image.png',
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend.common import KerasVariable
if backend.backend() == "tensorflow":
BackendVariable = backend.tensorflow.core.Variable
backend_name_scope = backend.tensorflow.core.name_scope
elif backend.backend() == "jax":
BackendVariable = backend.jax.core.Variable
backend_name_scope = backend.common.name_scope.name_scope
elif backend.backend() == "torch":
BackendVariable = backend.torch.core.Variable
backend_name_scope = backend.common.name_scope.name_scope
elif backend.backend() == "numpy":
from keras.src.backend.numpy.core import Variable as NumpyVariable
BackendVariable = NumpyVariable
backend_name_scope = backend.common.name_scope.name_scope
else:
raise RuntimeError(f"Invalid backend: {backend.backend()}")
@keras_export("keras.Variable")
class Variable(BackendVariable, KerasVariable):
pass
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return backend.device_scope(device_name)
|
from keras.src import backend
from keras.src.api_export import keras_export
if backend.backend() == "tensorflow":
BackendVariable = backend.tensorflow.core.Variable
backend_name_scope = backend.tensorflow.core.name_scope
elif backend.backend() == "jax":
BackendVariable = backend.jax.core.Variable
backend_name_scope = backend.common.name_scope.name_scope
elif backend.backend() == "torch":
BackendVariable = backend.torch.core.Variable
backend_name_scope = backend.common.name_scope.name_scope
elif backend.backend() == "numpy":
from keras.src.backend.numpy.core import Variable as NumpyVariable
BackendVariable = NumpyVariable
backend_name_scope = backend.common.name_scope.name_scope
else:
raise RuntimeError(f"Invalid backend: {backend.backend()}")
@keras_export("keras.Variable")
class Variable(BackendVariable):
pass
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return backend.device_scope(device_name)
|
# training schedule for 2x
train_cfg = dict(by_epoch=True, max_epochs=24)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=24)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('weaviate', {'n_dim': 3, 'distance': 'l2-squared'}),
('annlite', {'n_dim': 3, 'metric': 'Euclidean'}),
('qdrant', {'n_dim': 3, 'distance': 'euclidean'}),
('elasticsearch', {'n_dim': 3, 'distance': 'l2_norm'}),
('sqlite', dict()),
('redis', {'n_dim': 3, 'distance': 'L2', 'flush': True}),
],
)
def test_del_subindex(storage, config):
n_dim = 3
subindex_configs = (
{'@c': dict()} if storage in ['sqlite', 'memory'] else {'@c': {'n_dim': 2}}
)
da = DocumentArray(
storage=storage,
config=config,
subindex_configs=subindex_configs,
)
with da:
da.extend(
[
Document(
id=str(i),
embedding=i * np.ones(n_dim),
chunks=[
Document(id=str(i) + '_0', embedding=np.array([i, i])),
Document(id=str(i) + '_1', embedding=np.array([i, i])),
],
)
for i in range(10)
]
)
del da['0']
assert len(da) == 9
assert len(da._subindices['@c']) == 18
del da[-2:]
assert len(da) == 7
assert len(da._subindices['@c']) == 14
def test_del_subindex_annlite_multimodal():
from docarray import dataclass
from docarray.typing import Text
@dataclass
class MMDoc:
my_text: Text
my_other_text: Text
n_dim = 3
da = DocumentArray(
storage='annlite',
config={'n_dim': n_dim, 'metric': 'Euclidean'},
subindex_configs={'@.[my_text, my_other_text]': {'n_dim': 2}},
)
num_docs = 10
docs_to_add = DocumentArray(
[
Document(MMDoc(my_text='hello', my_other_text='world'))
for _ in range(num_docs)
]
)
for i, d in enumerate(docs_to_add):
d.id = str(i)
d.embedding = i * np.ones(n_dim)
d.my_text.id = str(i) + '_0'
d.my_text.embedding = [i, i]
d.my_other_text.id = str(i) + '_1'
d.my_other_text.embedding = [i, i]
with da:
da.extend(docs_to_add)
del da['0']
assert len(da) == 9
assert len(da._subindices['@.[my_text, my_other_text]']) == 18
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('weaviate', {'n_dim': 3, 'distance': 'l2-squared'}),
('annlite', {'n_dim': 3, 'metric': 'Euclidean'}),
('qdrant', {'n_dim': 3, 'distance': 'euclidean'}),
('elasticsearch', {'n_dim': 3, 'distance': 'l2_norm'}),
('sqlite', dict()),
],
)
def test_del_subindex(storage, config):
n_dim = 3
subindex_configs = (
{'@c': dict()} if storage in ['sqlite', 'memory'] else {'@c': {'n_dim': 2}}
)
da = DocumentArray(
storage=storage,
config=config,
subindex_configs=subindex_configs,
)
with da:
da.extend(
[
Document(
id=str(i),
embedding=i * np.ones(n_dim),
chunks=[
Document(id=str(i) + '_0', embedding=np.array([i, i])),
Document(id=str(i) + '_1', embedding=np.array([i, i])),
],
)
for i in range(10)
]
)
del da['0']
assert len(da) == 9
assert len(da._subindices['@c']) == 18
del da[-2:]
assert len(da) == 7
assert len(da._subindices['@c']) == 14
def test_del_subindex_annlite_multimodal():
from docarray import dataclass
from docarray.typing import Text
@dataclass
class MMDoc:
my_text: Text
my_other_text: Text
n_dim = 3
da = DocumentArray(
storage='annlite',
config={'n_dim': n_dim, 'metric': 'Euclidean'},
subindex_configs={'@.[my_text, my_other_text]': {'n_dim': 2}},
)
num_docs = 10
docs_to_add = DocumentArray(
[
Document(MMDoc(my_text='hello', my_other_text='world'))
for _ in range(num_docs)
]
)
for i, d in enumerate(docs_to_add):
d.id = str(i)
d.embedding = i * np.ones(n_dim)
d.my_text.id = str(i) + '_0'
d.my_text.embedding = [i, i]
d.my_other_text.id = str(i) + '_1'
d.my_other_text.embedding = [i, i]
with da:
da.extend(docs_to_add)
del da['0']
assert len(da) == 9
assert len(da._subindices['@.[my_text, my_other_text]']) == 18
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
"""tokenizing and encoding/decoding text using SentencePiece."""
def __init__(self, model_path: str):
"""
Initializes the Tokenizer with a SentencePiece model.
Args:
model_path (str): The path to the SentencePiece model file.
"""
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
"""
Encodes a string into a list of token IDs.
Args:
s (str): The input string to be encoded.
bos (bool): Whether to prepend the beginning-of-sequence token.
eos (bool): Whether to append the end-of-sequence token.
Returns:
List[int]: A list of token IDs.
"""
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
"""
Decodes a list of token IDs into a string.
Args:
t (List[int]): The list of token IDs to be decoded.
Returns:
str: The decoded string.
"""
return self.sp_model.decode(t)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
|
__version__ = "2.6.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .quantization import quantize_embeddings
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"quantize_embeddings",
]
|
__version__ = "2.6.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in mmcv.track_iter_progress(video_reader):
result = inference_detector(model, frame)
frame = model.show_result(frame, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
import argparse
import cv2
import mmcv
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in mmcv.track_iter_progress(video_reader):
result = inference_detector(model, frame)
frame = model.show_result(frame, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.7'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.6'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import argparse
import pathlib
import re
import shutil
def main(args):
if args.scala_version == "2.12":
scala_ver = "2.12"
scala_patchver = "2.12.18"
elif args.scala_version == "2.13":
scala_ver = "2.13"
scala_patchver = "2.13.11"
else:
raise ValueError(f"Unsupported Scala version: {args.scala_version}")
# Clean artifacts
if args.purge_artifacts:
for target in pathlib.Path("jvm-packages/").glob("**/target"):
if target.is_dir():
print(f"Removing {target}...")
shutil.rmtree(target)
# Update pom.xml
for pom in pathlib.Path("jvm-packages/").glob("**/pom.xml"):
print(f"Updating {pom}...")
with open(pom, "r", encoding="utf-8") as f:
lines = f.readlines()
with open(pom, "w", encoding="utf-8") as f:
replaced_scalaver = False
replaced_scala_binver = False
for line in lines:
for artifact in [
"xgboost-jvm",
"xgboost4j",
"xgboost4j-spark",
"xgboost4j-spark-gpu",
"xgboost4j-flink",
"xgboost4j-example",
]:
line = re.sub(
f"<artifactId>{artifact}_[0-9\\.]*",
f"<artifactId>{artifact}_{scala_ver}",
line,
)
# Only replace the first occurrence of scala.version
if not replaced_scalaver:
line, nsubs = re.subn(
r"<scala.version>[0-9\.]*",
f"<scala.version>{scala_patchver}",
line,
)
if nsubs > 0:
replaced_scalaver = True
# Only replace the first occurrence of scala.binary.version
if not replaced_scala_binver:
line, nsubs = re.subn(
r"<scala.binary.version>[0-9\.]*",
f"<scala.binary.version>{scala_ver}",
line,
)
if nsubs > 0:
replaced_scala_binver = True
f.write(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--purge-artifacts", action="store_true")
parser.add_argument(
"--scala-version",
type=str,
required=True,
help="Version of Scala to use in the JVM packages",
choices=["2.12", "2.13"],
)
parsed_args = parser.parse_args()
main(parsed_args)
|
import argparse
import pathlib
import re
import shutil
def main(args):
if args.scala_version == "2.12":
scala_ver = "2.12"
scala_patchver = "2.12.18"
elif args.scala_version == "2.13":
scala_ver = "2.13"
scala_patchver = "2.13.11"
else:
raise ValueError(f"Unsupported Scala version: {args.scala_version}")
# Clean artifacts
if args.purge_artifacts:
for target in pathlib.Path("jvm-packages/").glob("**/target"):
if target.is_dir():
print(f"Removing {target}...")
shutil.rmtree(target)
# Update pom.xml
for pom in pathlib.Path("jvm-packages/").glob("**/pom.xml"):
print(f"Updating {pom}...")
with open(pom, "r", encoding="utf-8") as f:
lines = f.readlines()
with open(pom, "w", encoding="utf-8") as f:
replaced_scalaver = False
replaced_scala_binver = False
for line in lines:
for artifact in [
"xgboost-jvm",
"xgboost4j",
"xgboost4j-gpu",
"xgboost4j-spark",
"xgboost4j-spark-gpu",
"xgboost4j-flink",
"xgboost4j-example",
]:
line = re.sub(
f"<artifactId>{artifact}_[0-9\\.]*",
f"<artifactId>{artifact}_{scala_ver}",
line,
)
# Only replace the first occurrence of scala.version
if not replaced_scalaver:
line, nsubs = re.subn(
r"<scala.version>[0-9\.]*",
f"<scala.version>{scala_patchver}",
line,
)
if nsubs > 0:
replaced_scalaver = True
# Only replace the first occurrence of scala.binary.version
if not replaced_scala_binver:
line, nsubs = re.subn(
r"<scala.binary.version>[0-9\.]*",
f"<scala.binary.version>{scala_ver}",
line,
)
if nsubs > 0:
replaced_scala_binver = True
f.write(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--purge-artifacts", action="store_true")
parser.add_argument(
"--scala-version",
type=str,
required=True,
help="Version of Scala to use in the JVM packages",
choices=["2.12", "2.13"],
)
parsed_args = parser.parse_args()
main(parsed_args)
|
import asyncio
from typing import Any, AsyncGenerator, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self.ctx = ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if not self.ctx:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[Event]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx and not self.ctx.stepwise:
raise ValueError("Stepwise context is required to run stepwise.")
if self.ctx:
try:
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
# Remove any reference to the tasks
for t in self.ctx._tasks:
t.cancel()
await asyncio.sleep(0)
# the context is no longer running
self.ctx.is_running = False
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx._step_event_holding
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
else:
raise ValueError("Context is not set!")
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
import asyncio
from typing import Any, AsyncGenerator, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self.ctx = ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if not self.ctx:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[Event]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx and not self.ctx.stepwise:
raise ValueError("Stepwise context is required to run stepwise.")
if self.ctx:
try:
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
# Remove any reference to the tasks
for t in self.ctx._tasks:
t.cancel()
await asyncio.sleep(0)
# the context is no longer running
self.ctx.is_running = False
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx._step_event_holding
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
else:
raise ValueError("Context is not set!")
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
import itertools
import os.path
import pytest
import requests as req
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
def test_flow_multiprotocol_with_monitoring():
port_monitoring = random_port()
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(
port=ports, protocol=protocols, monitoring=True, port_monitoring=port_monitoring
)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
resp = req.get(f'http://localhost:{port_monitoring}/')
assert resp.status_code == 200
assert (
'jina_successful_requests_total{runtime_name="gateway/rep-0"} 3.0'
in str(resp.content)
)
def test_flow_multiprotocol_with_tracing():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(port=ports, protocol=protocols, tracing=True)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
|
import itertools
import os.path
import pytest
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
def test_flow_multiprotocol_with_monitoring():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(port=ports, protocol=protocols, monitoring=True)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
|
from typing import Optional
from typing_extensions import Protocol, runtime_checkable
from torch.distributed._state_dict_utils import _copy_state_dict, _create_cpu_state_dict
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
__all__ = ["AsyncStager", "BlockingAsyncStager"]
@runtime_checkable
class AsyncStager(Protocol):
"""
This protocol is meant to provide customization and extensibility for dcp.async_save, allowing users
to customize how data is staged previous to executing the usual dcp.save path in parallel.
The expected order of operations (concretely defined in `torch.distributed.state_dict_saver.async_save`)
is the following:
1. AsyncStager.stage_data(state_dict):
This call gives the AsyncStager the opportunity to 'stage'
the state_dict. The expectation and purpose of staging in this context is to create a "training-safe"
representation of the state dict, meaning that any updates to module data after staging is complete
should not be reflected in the state dict returned from this method. For example, in the default
case a copy of the entire state dict is created on CPU RAM and returned here, allowing users
to continue training without risking changes to data which is being serialized.
2. dcp.save is called on the state_dict returned from stage in parallel. This call is responsible
for serializing the state_dict and writing it to storage.
3. If AsyncStager.should_synchronize_after_execute is True, this method will be called immediately after
the serialization thread starts and before returning from dcp.async_save. If this is set to False,
the assumption is the user has defined a custom synchronization point for the the purpose of further
optimizing save latency in the training loop (for example, by overlapping staging with the
forward/backward pass), and it is the respondsibility of the user to call `AsyncStager.synchronize_staging`
at the appropriate time.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = True
@property
def should_synchronize_after_execute(self) -> bool:
"""
Whether to synchronize after executing the stage.
"""
return self._synchronize_after_execute
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a "staged" copy of `state_dict`. The expectation of the staged copy is that it is
inoculated from any updates incurred after the stage call is complete.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement stage method"
)
def synchronize_staging(self) -> None:
"""
In the case `stage` is async in some way, this method should be called to ensure staging
is complete and it is safe to begin modifying the original `state_dict`
"""
class BlockingAsyncStager(AsyncStager):
"""
An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete.
This implementation also provides an option to optimize stage latency using pinned memory.
N.B. synchronize_staging is a no-op in this case.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = False
def __init__(
self,
cache_staged_state_dict: bool = False,
type_check: bool = False,
):
"""
Initializes the BlockingAsyncStager.
Args:
cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency
at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation
that the stager is maintained and reused for multiple dcp.async_save calls. Default to False.
type_check: Whether to perform a type check during cpu_offload. Defaults to False.
"""
self.cache_staged_state_dict = cache_staged_state_dict
self.type_check = type_check
self.state_dict_cache: Optional[STATE_DICT_TYPE] = None
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a copy of `state_dict` on the CPU.
"""
if not self.cache_staged_state_dict:
staged_state_dict = _create_cpu_state_dict(state_dict)
_copy_state_dict(state_dict, staged_state_dict, type_check=self.type_check)
return staged_state_dict
if self.state_dict_cache is None:
self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)
return _copy_state_dict(state_dict, self.state_dict_cache)
def synchronize_staging(self) -> None:
"""
No-op function, since staging is blocking.
"""
|
from typing import Optional
from typing_extensions import Protocol, runtime_checkable
from torch.distributed._state_dict_utils import _copy_state_dict, _create_cpu_state_dict
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
__all__ = ["AsyncStager", "BlockingAsyncStager"]
@runtime_checkable
class AsyncStager(Protocol):
"""
This protocol is meant to provide customization and extensibility for dcp.async_save, allowing users
to customize how data is staged previous to executing the usual dcp.save path in parallel.
The expected order of operations (concretely defined in `torch.distributed.state_dict_saver.async_save`)
is the following:
1. AsyncStager.stage_data(state_dict):
This call gives the AsyncStager the opportunity to 'stage'
the state_dict. The expectation and purpose of staging in this context is to create a "training-safe"
representation of the state dict, meaning that any updates to module data after staging is complete
should not be reflected in the state dict returned from this method. For example, in the default
case a copy of the entire state dict is created on CPU RAM and returned here, allowing users
to continue training without risking changes to data which is being serialized.
2. dcp.save is called on the state_dict returned from stage in parallel. This call is responsible
for serializing the state_dict and writing it to storage.
3. If AsyncStager.should_synchronize_after_execute is True, this method will be called immediately after
the serialization thread starts and before returning from dcp.async_save. If this is set to False,
the assumption is the user has defined a custom synchronization point for the the purpose of further
optimizing save latency in the training loop (for example, by overlapping staging with the
forward/backward pass), and it is the respondsibility of the user to call `AsyncStager.synchronize_staging`
at the appropriate time.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = True
@property
def should_synchronize_after_execute(self) -> bool:
"""
Whether to synchronize after executing the stage.
"""
return self._synchronize_after_execute
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a "staged" copy of `state_dict`. The expectation of the staged copy is that it is
innoculated from any updates incurred after the stage call is complete.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement stage method"
)
def synchronize_staging(self) -> None:
"""
In the case `stage` is async in some way, this method should be called to ensure staging
is complete and it is safe to begin modifying the original `state_dict`
"""
class BlockingAsyncStager(AsyncStager):
"""
An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete.
This implementation also provides an option to optimize stage latency using pinned memory.
N.B. synchronize_staging is a no-op in this case.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = False
def __init__(
self,
cache_staged_state_dict: bool = False,
type_check: bool = False,
):
"""
Initializes the BlockingAsyncStager.
Args:
cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency
at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation
that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False.
type_check: Whether to perform a type check during cpu_offload. Defaults to False.
"""
self.cache_staged_state_dict = cache_staged_state_dict
self.type_check = type_check
self.state_dict_cache: Optional[STATE_DICT_TYPE] = None
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a copy of `state_dict` on the CPU.
"""
if not self.cache_staged_state_dict:
staged_state_dict = _create_cpu_state_dict(state_dict)
_copy_state_dict(state_dict, staged_state_dict, type_check=self.type_check)
return staged_state_dict
if self.state_dict_cache is None:
self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)
return _copy_state_dict(state_dict, self.state_dict_cache)
def synchronize_staging(self) -> None:
"""
No-op function, since staging is blocking.
"""
|
# dataset settings
dataset_type = 'CityscapesDataset'
# TODO remove it after cityscape metric
# data_root = '/mnt/lustre/luochunhua.vendor/openmmlab2.0/data/cityscapes/'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm'])
test_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm'])
# TODO add setting on test dataset after cityscape metric
# inference on test dataset and
# format the output results for submission.
# test_dataloader = None
# test_evaluator = None
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_train.json',
img_prefix=data_root + 'leftImg8bit/train/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
img_prefix=data_root + 'leftImg8bit/val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_test.json',
img_prefix=data_root + 'leftImg8bit/test/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
grayscale_to_rgb,
grayscale_to_rgb_image,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
"""Implementations of key-value stores and storage helpers.
Module provides implementations of various key-value stores that conform
to a simple key-value interface.
The primary goal of these storages is to support implementation of caching.
"""
from typing import TYPE_CHECKING, Any
from langchain_core.stores import (
InMemoryByteStore,
InMemoryStore,
InvalidKeyException,
)
from langchain._api import create_importer
from langchain.storage._lc_store import create_kv_docstore, create_lc_store
from langchain.storage.encoder_backed import EncoderBackedStore
from langchain.storage.file_system import LocalFileStore
if TYPE_CHECKING:
from langchain_community.storage import (
RedisStore,
UpstashRedisByteStore,
UpstashRedisStore,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedisStore": "langchain_community.storage",
"UpstashRedisByteStore": "langchain_community.storage",
"UpstashRedisStore": "langchain_community.storage",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"EncoderBackedStore",
"InMemoryByteStore",
"InMemoryStore",
"InvalidKeyException",
"LocalFileStore",
"RedisStore",
"UpstashRedisByteStore",
"UpstashRedisStore",
"create_kv_docstore",
"create_lc_store",
]
|
"""Implementations of key-value stores and storage helpers.
Module provides implementations of various key-value stores that conform
to a simple key-value interface.
The primary goal of these storages is to support implementation of caching.
"""
from typing import TYPE_CHECKING, Any
from langchain_core.stores import (
InMemoryByteStore,
InMemoryStore,
InvalidKeyException,
)
from langchain._api import create_importer
from langchain.storage._lc_store import create_kv_docstore, create_lc_store
from langchain.storage.encoder_backed import EncoderBackedStore
from langchain.storage.file_system import LocalFileStore
if TYPE_CHECKING:
from langchain_community.storage import (
RedisStore,
UpstashRedisByteStore,
UpstashRedisStore,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedisStore": "langchain_community.storage",
"UpstashRedisByteStore": "langchain_community.storage",
"UpstashRedisStore": "langchain_community.storage",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_kv_docstore",
"create_lc_store",
"EncoderBackedStore",
"InMemoryByteStore",
"InMemoryStore",
"InvalidKeyException",
"LocalFileStore",
"RedisStore",
"UpstashRedisByteStore",
"UpstashRedisStore",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine import MMLogger
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.utils import mkdir_or_exist
from mmdet.utils import register_all_modules
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--task',
choices=['inference', 'dataloader', 'dataset'],
default='dataloader',
help='Which task do you want to go to benchmark')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--num-warmup', type=int, default=5, help='Number of warmup')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--dataset-type',
choices=['train', 'val', 'test'],
default='test',
help='Benchmark dataset type. only supports train, val and test')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def inference_benchmark(args, cfg, distributed, logger):
benchmark = InferenceBenchmark(
cfg,
args.checkpoint,
distributed,
args.fuse_conv_bn,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataloader_benchmark(args, cfg, distributed, logger):
benchmark = DataLoaderBenchmark(
cfg,
distributed,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataset_benchmark(args, cfg, distributed, logger):
benchmark = DatasetBenchmark(
cfg,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def main():
register_all_modules()
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mkdir_or_exist(args.work_dir)
logger = MMLogger.get_instance(
'mmdet', log_file=log_file, log_level='INFO')
benchmark = eval(f'{args.task}_benchmark')(args, cfg, distributed, logger)
benchmark.run(args.repeat_num)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
from mmcv import Config, DictAction
from mmengine import MMLogger
from mmengine.dist import init_dist
from mmdet.utils import register_all_modules
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--task',
choices=['inference', 'dataloader', 'dataset'],
default='dataloader',
help='Which task do you want to go to benchmark')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--num-warmup', type=int, default=5, help='Number of warmup')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--dataset-type',
choices=['train', 'val', 'test'],
default='test',
help='Benchmark dataset type. only supports train, val and test')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def inference_benchmark(args, cfg, distributed, logger):
benchmark = InferenceBenchmark(
cfg,
args.checkpoint,
distributed,
args.fuse_conv_bn,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataloader_benchmark(args, cfg, distributed, logger):
benchmark = DataLoaderBenchmark(
cfg,
distributed,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataset_benchmark(args, cfg, distributed, logger):
benchmark = DatasetBenchmark(
cfg,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def main():
register_all_modules()
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mmcv.mkdir_or_exist(args.work_dir)
logger = MMLogger.get_instance(
'mmdet', log_file=log_file, log_level='INFO')
benchmark = eval(f'{args.task}_benchmark')(args, cfg, distributed, logger)
benchmark.run(args.repeat_num)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .cspnext import CSPNeXt
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer',
'PyramidVisionTransformerV2', 'EfficientNet', 'CSPNeXt'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer',
'PyramidVisionTransformerV2', 'EfficientNet'
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
import os
from typing import Optional, Type
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
_console: Console = Console()
class BaseDocument(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: Optional[ID] = Field(default_factory=lambda: ID(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self):
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self):
"""Displays the object in IPython as a summary"""
self.summary()
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
_console: Console = Console()
class BaseDocument(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self):
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self):
"""Displays the object in IPython as a summary"""
self.summary()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, SourceSeparationBundle
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"SourceSeparationBundle",
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
]
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class CashFlowStatementsSchema(BaseModel):
"""Input for CashFlowStatements."""
ticker: str = Field(
description="The ticker symbol to fetch cash flow statements for.",
)
period: str = Field(
description="The period of the cash flow statement. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of cash flow statements to return. Default is 10.",
)
class CashFlowStatements(BaseTool):
"""
Tool that gets cash flow statements for a given ticker over a given period.
"""
mode: str = "get_cash_flow_statements"
name: str = "cash_flow_statements"
description: str = (
"A wrapper around financial datasets's Cash Flow Statements API. "
"This tool is useful for fetching cash flow statements for a given ticker."
"The tool fetches cash flow statements for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of cash flow statements to return can also be "
"specified using the limit parameter."
)
args_schema: Type[CashFlowStatementsSchema] = CashFlowStatementsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Cash Flow Statements API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class CashFlowStatementsSchema(BaseModel):
"""Input for CashFlowStatements."""
ticker: str = Field(
description="The ticker symbol to fetch cash flow statements for.",
)
period: str = Field(
description="The period of the cash flow statement. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of cash flow statements to return. Default is 10.",
)
class CashFlowStatements(BaseTool): # type: ignore[override, override]
"""
Tool that gets cash flow statements for a given ticker over a given period.
"""
mode: str = "get_cash_flow_statements"
name: str = "cash_flow_statements"
description: str = (
"A wrapper around financial datasets's Cash Flow Statements API. "
"This tool is useful for fetching cash flow statements for a given ticker."
"The tool fetches cash flow statements for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of cash flow statements to return can also be "
"specified using the limit parameter."
)
args_schema: Type[CashFlowStatementsSchema] = CashFlowStatementsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Cash Flow Statements API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GroundingDINO',
num_queries=900,
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=False,
),
language_model=dict(
type='BertModel',
name=lang_model_name,
pad_to_max=False,
use_sub_sentence_represent=True,
special_tokens_list=['[CLS]', '[SEP]', '.', '?'],
add_pooling_layer=True,
),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='ChannelMapper',
in_channels=[192, 384, 768],
kernel_size=1,
out_channels=256,
act_cfg=None,
bias=True,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
# visual layer config
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
# text layer config
text_layer_cfg=dict(
self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)),
# fusion layer config
fusion_layer_cfg=dict(
v_dim=256,
l_dim=256,
embed_dim=1024,
num_heads=4,
init_values=1e-4),
),
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
# query self attention layer
self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to text
cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to image
cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128, normalize=True, offset=0.0, temperature=20),
bbox_head=dict(
type='GroundingDINOHead',
num_classes=80,
sync_cls_avg_factor=True,
contrastive_cfg=dict(max_text_len=256),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=None,
test_cfg=dict(max_per_img=300))
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities',
'tokens_positive'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_classes=True))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GroundingDINO',
num_queries=900,
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=False,
),
language_model=dict(
type='BertModel',
name=lang_model_name,
pad_to_max=False,
use_sub_sentence_represent=True,
special_tokens_list=['[CLS]', '[SEP]', '.', '?'],
add_pooling_layer=True,
),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='ChannelMapper',
in_channels=[192, 384, 768],
kernel_size=1,
out_channels=256,
act_cfg=None,
bias=True,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
# visual layer config
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
# text layer config
text_layer_cfg=dict(
self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)),
# fusion layer config
fusion_layer_cfg=dict(
v_dim=256,
l_dim=256,
embed_dim=1024,
num_heads=4,
init_values=1e-4),
),
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
# query self attention layer
self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to text
cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to image
cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128, normalize=True, offset=0.0, temperature=20),
bbox_head=dict(
type='GroundingDINOHead',
num_classes=80,
sync_cls_avg_factor=True,
contrastive_cfg=dict(max_text_len=256),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=None,
test_cfg=dict(max_per_img=300))
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_classes=True))
test_dataloader = val_dataloader
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
"""Evaluator."""
from abc import abstractmethod
from typing import Any, Optional, Sequence
from llama_index.core.async_utils import asyncio_run
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType
class EvaluationResult(BaseModel):
"""
Evaluation result.
Output of an BaseEvaluator.
"""
query: Optional[str] = Field(default=None, description="Query string")
contexts: Optional[Sequence[str]] = Field(
default=None, description="Context strings"
)
response: Optional[str] = Field(default=None, description="Response string")
passing: Optional[bool] = Field(
default=None, description="Binary evaluation result (passing or not)"
)
feedback: Optional[str] = Field(
default=None, description="Feedback or reasoning for the response"
)
score: Optional[float] = Field(default=None, description="Score for the response")
pairwise_source: Optional[str] = Field(
default=None,
description=(
"Used only for pairwise and specifies whether it is from original order of"
" presented answers or flipped order"
),
)
invalid_result: bool = Field(
default=False, description="Whether the evaluation result is an invalid one."
)
invalid_reason: Optional[str] = Field(
default=None, description="Reason for invalid evaluation."
)
class BaseEvaluator(PromptMixin):
"""Base Evaluator class."""
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def evaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
return asyncio_run(
self.aevaluate(
query=query,
response=response,
contexts=contexts,
**kwargs,
)
)
@abstractmethod
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
raise NotImplementedError
def evaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return self.evaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
async def aevaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return await self.aevaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
# legacy: backward compatibility
Evaluation = EvaluationResult
|
"""Evaluator."""
from abc import abstractmethod
from typing import Any, Optional, Sequence
from llama_index.core.async_utils import asyncio_run
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType
class EvaluationResult(BaseModel):
"""
Evaluation result.
Output of an BaseEvaluator.
"""
query: Optional[str] = Field(default=None, description="Query string")
contexts: Optional[Sequence[str]] = Field(
default=None, description="Context strings"
)
response: Optional[str] = Field(default=None, description="Response string")
passing: Optional[bool] = Field(
default=None, description="Binary evaluation result (passing or not)"
)
feedback: Optional[str] = Field(
default=None, description="Feedback or reasoning for the response"
)
score: Optional[float] = Field(default=None, description="Score for the response")
pairwise_source: Optional[str] = Field(
default=None,
description=(
"Used only for pairwise and specifies whether it is from original order of"
" presented answers or flipped order"
),
)
invalid_result: bool = Field(
default=False, description="Whether the evaluation result is an invalid one."
)
invalid_reason: Optional[str] = Field(
default=None, description="Reason for invalid evaluation."
)
class BaseEvaluator(PromptMixin):
"""Base Evaluator class."""
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def evaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
return asyncio_run(
self.aevaluate(
query=query,
response=response,
contexts=contexts,
**kwargs,
)
)
@abstractmethod
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
raise NotImplementedError
def evaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return self.evaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
async def aevaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return await self.aevaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
# legacy: backward compatibility
Evaluation = EvaluationResult
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
roi_head=dict(
mask_head=dict(
upsample_cfg=dict(
type='carafe',
scale_factor=2,
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64))))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
roi_head=dict(
mask_head=dict(
upsample_cfg=dict(
type='carafe',
scale_factor=2,
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.